1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
12 struct rte_mbuf *new_mb = NULL;
13 struct eth_rx_bd *rx_bd;
15 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
17 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
18 if (unlikely(!new_mb)) {
20 "Failed to allocate rx buffer "
21 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
22 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
23 rte_mempool_avail_count(rxq->mb_pool),
24 rte_mempool_in_use_count(rxq->mb_pool));
27 rxq->sw_rx_ring[idx].mbuf = new_mb;
28 rxq->sw_rx_ring[idx].page_offset = 0;
29 mapping = rte_mbuf_data_iova_default(new_mb);
30 /* Advance PROD and get BD pointer */
31 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
32 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
33 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
38 #define QEDE_MAX_BULK_ALLOC_COUNT 512
40 static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
42 void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
43 struct rte_mbuf *mbuf = NULL;
44 struct eth_rx_bd *rx_bd;
49 idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
51 if (count > QEDE_MAX_BULK_ALLOC_COUNT)
52 count = QEDE_MAX_BULK_ALLOC_COUNT;
54 ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
57 "Failed to allocate %d rx buffers "
58 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
59 count, idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
60 rte_mempool_avail_count(rxq->mb_pool),
61 rte_mempool_in_use_count(rxq->mb_pool));
65 for (i = 0; i < count; i++) {
67 if (likely(i < count - 1))
68 rte_prefetch0(obj_p[i + 1]);
70 idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
71 rxq->sw_rx_ring[idx].mbuf = mbuf;
72 rxq->sw_rx_ring[idx].page_offset = 0;
73 mapping = rte_mbuf_data_iova_default(mbuf);
74 rx_bd = (struct eth_rx_bd *)
75 ecore_chain_produce(&rxq->rx_bd_ring);
76 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
77 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
84 /* Criterias for calculating Rx buffer size -
85 * 1) rx_buf_size should not exceed the size of mbuf
86 * 2) In scattered_rx mode - minimum rx_buf_size should be
87 * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
88 * 3) In regular mode - minimum rx_buf_size should be
89 * (MTU + Maximum L2 Header Size + 2)
90 * In above cases +2 corrosponds to 2 bytes padding in front of L2
92 * 4) rx_buf_size should be cacheline-size aligned. So considering
93 * criteria 1, we need to adjust the size to floor instead of ceil,
94 * so that we don't exceed mbuf size while ceiling rx_buf_size.
97 qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
98 uint16_t max_frame_size)
100 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
104 if (dev->data->scattered_rx) {
105 /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
106 * bufferes can be used for single packet. So need to make sure
107 * mbuf size is sufficient enough for this.
109 if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
110 (max_frame_size + QEDE_ETH_OVERHEAD)) {
111 DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
112 mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
116 rx_buf_size = RTE_MAX(mbufsz,
117 (max_frame_size + QEDE_ETH_OVERHEAD) /
118 ETH_RX_MAX_BUFF_PER_PKT);
120 rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
123 /* Align to cache-line size if needed */
124 return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
127 static struct qede_rx_queue *
128 qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
131 unsigned int socket_id,
132 struct rte_mempool *mp,
135 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
136 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
137 struct qede_rx_queue *rxq;
141 /* First allocate the rx queue data structure */
142 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
143 RTE_CACHE_LINE_SIZE, socket_id);
146 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
153 rxq->nb_rx_desc = nb_desc;
154 rxq->queue_id = queue_idx;
155 rxq->port_id = dev->data->port_id;
158 rxq->rx_buf_size = bufsz;
160 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
161 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
163 /* Allocate the parallel driver ring for Rx buffers */
164 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
165 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
166 RTE_CACHE_LINE_SIZE, socket_id);
167 if (!rxq->sw_rx_ring) {
168 DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
169 " socket %u\n", socket_id);
174 /* Allocate FW Rx ring */
175 rc = qdev->ops->common->chain_alloc(edev,
176 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
177 ECORE_CHAIN_MODE_NEXT_PTR,
178 ECORE_CHAIN_CNT_TYPE_U16,
180 sizeof(struct eth_rx_bd),
184 if (rc != ECORE_SUCCESS) {
185 DP_ERR(edev, "Memory allocation fails for RX BD ring"
186 " on socket %u\n", socket_id);
187 rte_free(rxq->sw_rx_ring);
192 /* Allocate FW completion ring */
193 rc = qdev->ops->common->chain_alloc(edev,
194 ECORE_CHAIN_USE_TO_CONSUME,
195 ECORE_CHAIN_MODE_PBL,
196 ECORE_CHAIN_CNT_TYPE_U16,
198 sizeof(union eth_rx_cqe),
202 if (rc != ECORE_SUCCESS) {
203 DP_ERR(edev, "Memory allocation fails for RX CQE ring"
204 " on socket %u\n", socket_id);
205 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
206 rte_free(rxq->sw_rx_ring);
215 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
216 uint16_t nb_desc, unsigned int socket_id,
217 __rte_unused const struct rte_eth_rxconf *rx_conf,
218 struct rte_mempool *mp)
220 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
221 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
222 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
223 struct qede_rx_queue *rxq;
224 uint16_t max_rx_pkt_len;
228 PMD_INIT_FUNC_TRACE(edev);
230 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
231 if (!rte_is_power_of_2(nb_desc)) {
232 DP_ERR(edev, "Ring size %u is not power of 2\n",
237 /* Free memory prior to re-allocation if needed... */
238 if (dev->data->rx_queues[qid] != NULL) {
239 qede_rx_queue_release(dev->data->rx_queues[qid]);
240 dev->data->rx_queues[qid] = NULL;
243 max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
245 /* Fix up RX buffer size */
246 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
247 /* cache align the mbuf size to simplfy rx_buf_size calculation */
248 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
249 if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
250 (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
251 if (!dev->data->scattered_rx) {
252 DP_INFO(edev, "Forcing scatter-gather mode\n");
253 dev->data->scattered_rx = 1;
257 rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
263 rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
264 socket_id, mp, bufsz);
268 dev->data->rx_queues[qid] = rxq;
269 qdev->fp_array[qid].rxq = rxq;
271 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
272 qid, nb_desc, rxq->rx_buf_size, socket_id);
278 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
279 struct qede_rx_queue *rxq)
281 DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
282 ecore_chain_reset(&rxq->rx_bd_ring);
283 ecore_chain_reset(&rxq->rx_comp_ring);
286 *rxq->hw_cons_ptr = 0;
289 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
293 if (rxq->sw_rx_ring) {
294 for (i = 0; i < rxq->nb_rx_desc; i++) {
295 if (rxq->sw_rx_ring[i].mbuf) {
296 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
297 rxq->sw_rx_ring[i].mbuf = NULL;
303 static void _qede_rx_queue_release(struct qede_dev *qdev,
304 struct ecore_dev *edev,
305 struct qede_rx_queue *rxq)
307 qede_rx_queue_release_mbufs(rxq);
308 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
309 qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
310 rte_free(rxq->sw_rx_ring);
314 void qede_rx_queue_release(void *rx_queue)
316 struct qede_rx_queue *rxq = rx_queue;
317 struct qede_dev *qdev;
318 struct ecore_dev *edev;
322 edev = QEDE_INIT_EDEV(qdev);
323 PMD_INIT_FUNC_TRACE(edev);
324 _qede_rx_queue_release(qdev, edev, rxq);
328 /* Stops a given RX queue in the HW */
329 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
331 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
332 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
333 struct ecore_hwfn *p_hwfn;
334 struct qede_rx_queue *rxq;
338 if (rx_queue_id < qdev->num_rx_queues) {
339 rxq = qdev->fp_array[rx_queue_id].rxq;
340 hwfn_index = rx_queue_id % edev->num_hwfns;
341 p_hwfn = &edev->hwfns[hwfn_index];
342 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
344 if (rc != ECORE_SUCCESS) {
345 DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
348 qede_rx_queue_release_mbufs(rxq);
349 qede_rx_queue_reset(qdev, rxq);
350 eth_dev->data->rx_queue_state[rx_queue_id] =
351 RTE_ETH_QUEUE_STATE_STOPPED;
352 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
354 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
361 static struct qede_tx_queue *
362 qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
365 unsigned int socket_id,
366 const struct rte_eth_txconf *tx_conf)
368 struct qede_dev *qdev = dev->data->dev_private;
369 struct ecore_dev *edev = &qdev->edev;
370 struct qede_tx_queue *txq;
373 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
374 RTE_CACHE_LINE_SIZE, socket_id);
378 "Unable to allocate memory for txq on socket %u",
383 txq->nb_tx_desc = nb_desc;
385 txq->port_id = dev->data->port_id;
387 rc = qdev->ops->common->chain_alloc(edev,
388 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
389 ECORE_CHAIN_MODE_PBL,
390 ECORE_CHAIN_CNT_TYPE_U16,
392 sizeof(union eth_tx_bd_types),
395 if (rc != ECORE_SUCCESS) {
397 "Unable to allocate memory for txbd ring on socket %u",
399 qede_tx_queue_release(txq);
403 /* Allocate software ring */
404 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
405 (sizeof(struct qede_tx_entry) *
407 RTE_CACHE_LINE_SIZE, socket_id);
409 if (!txq->sw_tx_ring) {
411 "Unable to allocate memory for txbd ring on socket %u",
413 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
414 qede_tx_queue_release(txq);
418 txq->queue_id = queue_idx;
420 txq->nb_tx_avail = txq->nb_tx_desc;
422 txq->tx_free_thresh =
423 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
424 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
427 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
428 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
433 qede_tx_queue_setup(struct rte_eth_dev *dev,
436 unsigned int socket_id,
437 const struct rte_eth_txconf *tx_conf)
439 struct qede_dev *qdev = dev->data->dev_private;
440 struct ecore_dev *edev = &qdev->edev;
441 struct qede_tx_queue *txq;
443 PMD_INIT_FUNC_TRACE(edev);
445 if (!rte_is_power_of_2(nb_desc)) {
446 DP_ERR(edev, "Ring size %u is not power of 2\n",
451 /* Free memory prior to re-allocation if needed... */
452 if (dev->data->tx_queues[queue_idx] != NULL) {
453 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
454 dev->data->tx_queues[queue_idx] = NULL;
457 txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
462 dev->data->tx_queues[queue_idx] = txq;
463 qdev->fp_array[queue_idx].txq = txq;
469 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
470 struct qede_tx_queue *txq)
472 DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
473 ecore_chain_reset(&txq->tx_pbl);
476 *txq->hw_cons_ptr = 0;
479 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
483 if (txq->sw_tx_ring) {
484 for (i = 0; i < txq->nb_tx_desc; i++) {
485 if (txq->sw_tx_ring[i].mbuf) {
486 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
487 txq->sw_tx_ring[i].mbuf = NULL;
493 static void _qede_tx_queue_release(struct qede_dev *qdev,
494 struct ecore_dev *edev,
495 struct qede_tx_queue *txq)
497 qede_tx_queue_release_mbufs(txq);
498 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
499 rte_free(txq->sw_tx_ring);
503 void qede_tx_queue_release(void *tx_queue)
505 struct qede_tx_queue *txq = tx_queue;
506 struct qede_dev *qdev;
507 struct ecore_dev *edev;
511 edev = QEDE_INIT_EDEV(qdev);
512 PMD_INIT_FUNC_TRACE(edev);
513 _qede_tx_queue_release(qdev, edev, txq);
517 /* This function allocates fast-path status block memory */
519 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
522 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
523 struct status_block_e4 *sb_virt;
527 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
528 sizeof(struct status_block_e4));
530 DP_ERR(edev, "Status block allocation failed\n");
533 rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
536 DP_ERR(edev, "Status block initialization failed\n");
537 OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
538 sizeof(struct status_block_e4));
545 int qede_alloc_fp_resc(struct qede_dev *qdev)
547 struct ecore_dev *edev = &qdev->edev;
548 struct qede_fastpath *fp;
553 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
555 num_sbs = ecore_cxt_get_proto_cid_count
556 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
559 DP_ERR(edev, "No status blocks available\n");
563 qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
564 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
566 if (!qdev->fp_array) {
567 DP_ERR(edev, "fp array allocation failed\n");
571 memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
572 sizeof(*qdev->fp_array));
574 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
575 fp = &qdev->fp_array[sb_idx];
578 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
579 RTE_CACHE_LINE_SIZE);
581 DP_ERR(edev, "FP sb_info allocation fails\n");
584 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
585 DP_ERR(edev, "FP status block allocation fails\n");
588 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
589 fp->sb_info->igu_sb_id);
595 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
597 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
598 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
599 struct qede_fastpath *fp;
603 PMD_INIT_FUNC_TRACE(edev);
605 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
606 fp = &qdev->fp_array[sb_idx];
609 DP_INFO(edev, "Free sb_info index 0x%x\n",
610 fp->sb_info->igu_sb_id);
612 OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
613 fp->sb_info->sb_phys,
614 sizeof(struct status_block_e4));
615 rte_free(fp->sb_info);
620 /* Free packet buffers and ring memories */
621 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
622 if (eth_dev->data->rx_queues[i]) {
623 qede_rx_queue_release(eth_dev->data->rx_queues[i]);
624 eth_dev->data->rx_queues[i] = NULL;
628 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
629 if (eth_dev->data->tx_queues[i]) {
630 qede_tx_queue_release(eth_dev->data->tx_queues[i]);
631 eth_dev->data->tx_queues[i] = NULL;
636 rte_free(qdev->fp_array);
637 qdev->fp_array = NULL;
641 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
642 struct qede_rx_queue *rxq)
644 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
645 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
646 struct eth_rx_prod_data rx_prods = { 0 };
648 /* Update producers */
649 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
650 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
652 /* Make sure that the BD and SGE data is updated before updating the
653 * producers since FW might read the BD/SGE right after the producer
658 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
659 (uint32_t *)&rx_prods);
661 /* mmiowb is needed to synchronize doorbell writes from more than one
662 * processor. It guarantees that the write arrives to the device before
663 * the napi lock is released and another qede_poll is called (possibly
664 * on another CPU). Without this barrier, the next doorbell can bypass
665 * this doorbell. This is applicable to IA64/Altix systems.
669 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
672 /* Starts a given RX queue in HW */
674 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
676 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
677 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
678 struct ecore_queue_start_common_params params;
679 struct ecore_rxq_start_ret_params ret_params;
680 struct qede_rx_queue *rxq;
681 struct qede_fastpath *fp;
682 struct ecore_hwfn *p_hwfn;
683 dma_addr_t p_phys_table;
689 if (rx_queue_id < eth_dev->data->nb_rx_queues) {
690 fp = &qdev->fp_array[rx_queue_id];
691 rxq = eth_dev->data->rx_queues[rx_queue_id];
692 /* Allocate buffers for the Rx ring */
693 for (j = 0; j < rxq->nb_rx_desc; j++) {
694 rc = qede_alloc_rx_buffer(rxq);
696 DP_ERR(edev, "RX buffer allocation failed"
697 " for rxq = %u\n", rx_queue_id);
701 /* disable interrupts */
702 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
704 memset(¶ms, 0, sizeof(params));
705 params.queue_id = rx_queue_id / edev->num_hwfns;
707 params.stats_id = params.vport_id;
708 params.p_sb = fp->sb_info;
709 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
710 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
711 params.sb_idx = RX_PI;
712 hwfn_index = rx_queue_id % edev->num_hwfns;
713 p_hwfn = &edev->hwfns[hwfn_index];
714 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
715 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
716 memset(&ret_params, 0, sizeof(ret_params));
717 rc = ecore_eth_rx_queue_start(p_hwfn,
718 p_hwfn->hw_info.opaque_fid,
719 ¶ms, fp->rxq->rx_buf_size,
720 fp->rxq->rx_bd_ring.p_phys_addr,
721 p_phys_table, page_cnt,
724 DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
728 /* Update with the returned parameters */
729 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
730 fp->rxq->handle = ret_params.p_handle;
732 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
733 qede_update_rx_prod(qdev, fp->rxq);
734 eth_dev->data->rx_queue_state[rx_queue_id] =
735 RTE_ETH_QUEUE_STATE_STARTED;
736 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
738 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
746 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
748 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
749 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
750 struct ecore_queue_start_common_params params;
751 struct ecore_txq_start_ret_params ret_params;
752 struct ecore_hwfn *p_hwfn;
753 dma_addr_t p_phys_table;
754 struct qede_tx_queue *txq;
755 struct qede_fastpath *fp;
760 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
761 txq = eth_dev->data->tx_queues[tx_queue_id];
762 fp = &qdev->fp_array[tx_queue_id];
763 memset(¶ms, 0, sizeof(params));
764 params.queue_id = tx_queue_id / edev->num_hwfns;
766 params.stats_id = params.vport_id;
767 params.p_sb = fp->sb_info;
768 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
769 fp->txq->queue_id, fp->sb_info->igu_sb_id);
770 params.sb_idx = TX_PI(0); /* tc = 0 */
771 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
772 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
773 hwfn_index = tx_queue_id % edev->num_hwfns;
774 p_hwfn = &edev->hwfns[hwfn_index];
775 if (qdev->dev_info.is_legacy)
776 fp->txq->is_legacy = true;
777 rc = ecore_eth_tx_queue_start(p_hwfn,
778 p_hwfn->hw_info.opaque_fid,
780 p_phys_table, page_cnt,
782 if (rc != ECORE_SUCCESS) {
783 DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
787 txq->doorbell_addr = ret_params.p_doorbell;
788 txq->handle = ret_params.p_handle;
790 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
791 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
793 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
795 SET_FIELD(txq->tx_db.data.params,
796 ETH_DB_DATA_AGG_VAL_SEL,
797 DQ_XCM_ETH_TX_BD_PROD_CMD);
798 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
799 eth_dev->data->tx_queue_state[tx_queue_id] =
800 RTE_ETH_QUEUE_STATE_STARTED;
801 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
803 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
811 qede_free_tx_pkt(struct qede_tx_queue *txq)
813 struct rte_mbuf *mbuf;
818 mbuf = txq->sw_tx_ring[idx].mbuf;
820 nb_segs = mbuf->nb_segs;
821 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
823 /* It's like consuming rxbuf in recv() */
824 ecore_chain_consume(&txq->tx_pbl);
828 rte_pktmbuf_free(mbuf);
829 txq->sw_tx_ring[idx].mbuf = NULL;
831 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
833 ecore_chain_consume(&txq->tx_pbl);
839 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
840 struct qede_tx_queue *txq)
843 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
847 rte_compiler_barrier();
848 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
849 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
850 sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
851 PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
852 abs(hw_bd_cons - sw_tx_cons));
854 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
855 qede_free_tx_pkt(txq);
858 static int qede_drain_txq(struct qede_dev *qdev,
859 struct qede_tx_queue *txq, bool allow_drain)
861 struct ecore_dev *edev = &qdev->edev;
864 while (txq->sw_tx_cons != txq->sw_tx_prod) {
865 qede_process_tx_compl(edev, txq);
868 DP_ERR(edev, "Tx queue[%u] is stuck,"
869 "requesting MCP to drain\n",
871 rc = qdev->ops->common->drain(edev);
874 return qede_drain_txq(qdev, txq, false);
876 DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
877 "PROD=%d, CONS=%d\n",
878 txq->queue_id, txq->sw_tx_prod,
884 rte_compiler_barrier();
887 /* FW finished processing, wait for HW to transmit all tx packets */
893 /* Stops a given TX queue in the HW */
894 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
896 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
897 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
898 struct ecore_hwfn *p_hwfn;
899 struct qede_tx_queue *txq;
903 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
904 txq = eth_dev->data->tx_queues[tx_queue_id];
906 if (qede_drain_txq(qdev, txq, true))
907 return -1; /* For the lack of retcodes */
909 hwfn_index = tx_queue_id % edev->num_hwfns;
910 p_hwfn = &edev->hwfns[hwfn_index];
911 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
912 if (rc != ECORE_SUCCESS) {
913 DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
916 qede_tx_queue_release_mbufs(txq);
917 qede_tx_queue_reset(qdev, txq);
918 eth_dev->data->tx_queue_state[tx_queue_id] =
919 RTE_ETH_QUEUE_STATE_STOPPED;
920 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
922 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
929 int qede_start_queues(struct rte_eth_dev *eth_dev)
931 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
936 rc = qede_rx_queue_start(eth_dev, id);
937 if (rc != ECORE_SUCCESS)
942 rc = qede_tx_queue_start(eth_dev, id);
943 if (rc != ECORE_SUCCESS)
950 void qede_stop_queues(struct rte_eth_dev *eth_dev)
952 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
955 /* Stopping RX/TX queues */
957 qede_tx_queue_stop(eth_dev, id);
961 qede_rx_queue_stop(eth_dev, id);
965 static inline bool qede_tunn_exist(uint16_t flag)
967 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
968 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
971 static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
973 return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
974 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
978 * qede_check_tunn_csum_l4:
980 * 1 : If L4 csum is enabled AND if the validation has failed.
983 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
985 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
986 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
987 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
988 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
993 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
995 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
996 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
997 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
998 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
1003 /* Returns outer L2, L3 and L4 packet_type for tunneled packets */
1004 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
1006 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1007 struct rte_ether_hdr *eth_hdr;
1008 struct rte_ipv4_hdr *ipv4_hdr;
1009 struct rte_ipv6_hdr *ipv6_hdr;
1010 struct rte_vlan_hdr *vlan_hdr;
1012 bool vlan_tagged = 0;
1015 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1016 len = sizeof(struct rte_ether_hdr);
1017 ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
1019 /* Note: Valid only if VLAN stripping is disabled */
1020 if (ethertype == RTE_ETHER_TYPE_VLAN) {
1022 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
1023 len += sizeof(struct rte_vlan_hdr);
1024 ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
1027 if (ethertype == RTE_ETHER_TYPE_IPV4) {
1028 packet_type |= RTE_PTYPE_L3_IPV4;
1029 ipv4_hdr = rte_pktmbuf_mtod_offset(m,
1030 struct rte_ipv4_hdr *, len);
1031 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
1032 packet_type |= RTE_PTYPE_L4_TCP;
1033 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
1034 packet_type |= RTE_PTYPE_L4_UDP;
1035 } else if (ethertype == RTE_ETHER_TYPE_IPV6) {
1036 packet_type |= RTE_PTYPE_L3_IPV6;
1037 ipv6_hdr = rte_pktmbuf_mtod_offset(m,
1038 struct rte_ipv6_hdr *, len);
1039 if (ipv6_hdr->proto == IPPROTO_TCP)
1040 packet_type |= RTE_PTYPE_L4_TCP;
1041 else if (ipv6_hdr->proto == IPPROTO_UDP)
1042 packet_type |= RTE_PTYPE_L4_UDP;
1046 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1048 packet_type |= RTE_PTYPE_L2_ETHER;
1053 static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
1058 static const uint32_t
1059 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1060 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
1061 RTE_PTYPE_INNER_L2_ETHER,
1062 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
1063 RTE_PTYPE_INNER_L2_ETHER,
1064 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
1065 RTE_PTYPE_INNER_L4_TCP |
1066 RTE_PTYPE_INNER_L2_ETHER,
1067 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
1068 RTE_PTYPE_INNER_L4_TCP |
1069 RTE_PTYPE_INNER_L2_ETHER,
1070 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
1071 RTE_PTYPE_INNER_L4_UDP |
1072 RTE_PTYPE_INNER_L2_ETHER,
1073 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
1074 RTE_PTYPE_INNER_L4_UDP |
1075 RTE_PTYPE_INNER_L2_ETHER,
1076 /* Frags with no VLAN */
1077 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1078 RTE_PTYPE_INNER_L4_FRAG |
1079 RTE_PTYPE_INNER_L2_ETHER,
1080 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1081 RTE_PTYPE_INNER_L4_FRAG |
1082 RTE_PTYPE_INNER_L2_ETHER,
1084 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1085 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1086 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1087 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1088 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1089 RTE_PTYPE_INNER_L4_TCP |
1090 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1091 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1092 RTE_PTYPE_INNER_L4_TCP |
1093 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1094 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1095 RTE_PTYPE_INNER_L4_UDP |
1096 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1097 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1098 RTE_PTYPE_INNER_L4_UDP |
1099 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1100 /* Frags with VLAN */
1101 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1102 RTE_PTYPE_INNER_L4_FRAG |
1103 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1104 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1105 RTE_PTYPE_INNER_L4_FRAG |
1106 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1109 /* Bits (0..3) provides L3/L4 protocol type */
1110 /* Bits (4,5) provides frag and VLAN info */
1111 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1112 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1113 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1114 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1115 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1116 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1117 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1118 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1120 if (val < QEDE_PKT_TYPE_MAX)
1121 return ptype_lkup_tbl[val];
1123 return RTE_PTYPE_UNKNOWN;
1126 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
1131 static const uint32_t
1132 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1133 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
1134 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
1135 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
1138 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
1141 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
1144 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
1147 /* Frags with no VLAN */
1148 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
1151 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
1155 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
1156 RTE_PTYPE_L2_ETHER_VLAN,
1157 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
1158 RTE_PTYPE_L2_ETHER_VLAN,
1159 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
1161 RTE_PTYPE_L2_ETHER_VLAN,
1162 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
1164 RTE_PTYPE_L2_ETHER_VLAN,
1165 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
1167 RTE_PTYPE_L2_ETHER_VLAN,
1168 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
1170 RTE_PTYPE_L2_ETHER_VLAN,
1171 /* Frags with VLAN */
1172 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
1174 RTE_PTYPE_L2_ETHER_VLAN,
1175 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
1177 RTE_PTYPE_L2_ETHER_VLAN,
1180 /* Bits (0..3) provides L3/L4 protocol type */
1181 /* Bits (4,5) provides frag and VLAN info */
1182 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1183 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1184 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1185 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1186 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1187 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1188 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1189 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1191 if (val < QEDE_PKT_TYPE_MAX)
1192 return ptype_lkup_tbl[val];
1194 return RTE_PTYPE_UNKNOWN;
1197 static inline uint8_t
1198 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1200 struct rte_ipv4_hdr *ip;
1205 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1206 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1208 if (unlikely(val)) {
1209 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1210 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1211 ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
1212 sizeof(struct rte_ether_hdr));
1213 pkt_csum = ip->hdr_checksum;
1214 ip->hdr_checksum = 0;
1215 calc_csum = rte_ipv4_cksum(ip);
1216 ip->hdr_checksum = pkt_csum;
1217 return (calc_csum != pkt_csum);
1218 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1225 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1227 ecore_chain_consume(&rxq->rx_bd_ring);
1232 qede_reuse_page(__rte_unused struct qede_dev *qdev,
1233 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
1235 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1236 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
1237 struct qede_rx_entry *curr_prod;
1238 dma_addr_t new_mapping;
1240 curr_prod = &rxq->sw_rx_ring[idx];
1241 *curr_prod = *curr_cons;
1243 new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
1244 curr_prod->page_offset;
1246 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1247 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1253 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1254 struct qede_dev *qdev, uint8_t count)
1256 struct qede_rx_entry *curr_cons;
1258 for (; count > 0; count--) {
1259 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1260 qede_reuse_page(qdev, rxq, curr_cons);
1261 qede_rx_bd_ring_consume(rxq);
1266 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1267 struct qede_rx_queue *rxq,
1268 uint8_t agg_index, uint16_t len)
1270 struct qede_agg_info *tpa_info;
1271 struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
1274 /* Under certain conditions it is possible that FW may not consume
1275 * additional or new BD. So decision to consume the BD must be made
1276 * based on len_list[0].
1278 if (rte_le_to_cpu_16(len)) {
1279 tpa_info = &rxq->tpa_info[agg_index];
1280 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1281 curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
1283 curr_frag->nb_segs = 1;
1284 curr_frag->pkt_len = rte_le_to_cpu_16(len);
1285 curr_frag->data_len = curr_frag->pkt_len;
1286 tpa_info->tpa_tail->next = curr_frag;
1287 tpa_info->tpa_tail = curr_frag;
1288 qede_rx_bd_ring_consume(rxq);
1289 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1290 PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1291 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1292 rxq->rx_alloc_errors++;
1298 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1299 struct qede_rx_queue *rxq,
1300 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1302 PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1303 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1304 /* only len_list[0] will have value */
1305 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1310 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1311 struct qede_rx_queue *rxq,
1312 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1314 struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
1316 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1318 /* Update total length and frags based on end TPA */
1319 rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1320 /* TODO: Add Sanity Checks */
1321 rx_mb->nb_segs = cqe->num_of_bds;
1322 rx_mb->pkt_len = cqe->total_packet_len;
1324 PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1325 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1326 rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1330 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1335 static const uint32_t
1336 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1337 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1338 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1339 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1340 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1341 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1342 RTE_PTYPE_TUNNEL_GENEVE,
1343 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1344 RTE_PTYPE_TUNNEL_GRE,
1345 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1346 RTE_PTYPE_TUNNEL_VXLAN,
1347 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1348 RTE_PTYPE_TUNNEL_GENEVE,
1349 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1350 RTE_PTYPE_TUNNEL_GRE,
1351 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1352 RTE_PTYPE_TUNNEL_VXLAN,
1353 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1354 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1355 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1356 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1357 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1358 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1359 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1360 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1361 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1362 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1363 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1364 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1365 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1366 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1367 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1368 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1369 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1370 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1371 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1372 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1373 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1374 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1375 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1376 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1379 /* Cover bits[4-0] to include tunn_type and next protocol */
1380 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1381 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1382 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1383 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1385 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1386 return ptype_tunn_lkup_tbl[val];
1388 return RTE_PTYPE_UNKNOWN;
1392 qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
1393 uint8_t num_segs, uint16_t pkt_len)
1395 struct qede_rx_queue *rxq = p_rxq;
1396 struct qede_dev *qdev = rxq->qdev;
1397 register struct rte_mbuf *seg1 = NULL;
1398 register struct rte_mbuf *seg2 = NULL;
1399 uint16_t sw_rx_index;
1404 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1406 if (unlikely(!cur_size)) {
1407 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1408 " left for mapping jumbo\n", num_segs);
1409 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1412 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1413 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
1414 qede_rx_bd_ring_consume(rxq);
1415 pkt_len -= cur_size;
1416 seg2->data_len = cur_size;
1426 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1428 print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1431 PMD_RX_LOG(INFO, rxq,
1432 "len 0x%04x bf 0x%04x hash_val 0x%x"
1433 " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1434 " inner_l2=%s inner_l3=%s inner_l4=%s\n",
1435 m->data_len, bitfield, m->hash.rss,
1436 (unsigned long)m->ol_flags,
1437 rte_get_ptype_l2_name(m->packet_type),
1438 rte_get_ptype_l3_name(m->packet_type),
1439 rte_get_ptype_l4_name(m->packet_type),
1440 rte_get_ptype_tunnel_name(m->packet_type),
1441 rte_get_ptype_inner_l2_name(m->packet_type),
1442 rte_get_ptype_inner_l3_name(m->packet_type),
1443 rte_get_ptype_inner_l4_name(m->packet_type));
1448 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1450 struct qede_rx_queue *rxq = p_rxq;
1451 struct qede_dev *qdev = rxq->qdev;
1452 struct ecore_dev *edev = &qdev->edev;
1453 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1454 uint16_t rx_pkt = 0;
1455 union eth_rx_cqe *cqe;
1456 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1457 register struct rte_mbuf *rx_mb = NULL;
1458 register struct rte_mbuf *seg1 = NULL;
1459 enum eth_rx_cqe_type cqe_type;
1460 uint16_t pkt_len = 0; /* Sum of all BD segments */
1461 uint16_t len; /* Length of first BD */
1462 uint8_t num_segs = 1;
1463 uint16_t preload_idx;
1464 uint16_t parse_flag;
1465 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1466 uint8_t bitfield_val;
1468 uint8_t tunn_parse_flag;
1469 struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1471 uint32_t packet_type;
1474 uint8_t offset, tpa_agg_idx, flags;
1475 struct qede_agg_info *tpa_info = NULL;
1477 int rx_alloc_count = 0;
1480 /* Allocate buffers that we used in previous loop */
1481 if (rxq->rx_alloc_count) {
1482 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
1483 rxq->rx_alloc_count))) {
1484 struct rte_eth_dev *dev;
1486 PMD_RX_LOG(ERR, rxq,
1487 "New buffer allocation failed,"
1488 "dropping incoming packetn");
1489 dev = &rte_eth_devices[rxq->port_id];
1490 dev->data->rx_mbuf_alloc_failed +=
1491 rxq->rx_alloc_count;
1492 rxq->rx_alloc_errors += rxq->rx_alloc_count;
1495 qede_update_rx_prod(qdev, rxq);
1496 rxq->rx_alloc_count = 0;
1499 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1500 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1504 if (hw_comp_cons == sw_comp_cons)
1507 while (sw_comp_cons != hw_comp_cons) {
1509 packet_type = RTE_PTYPE_UNKNOWN;
1511 tpa_start_flg = false;
1514 /* Get the CQE from the completion ring */
1516 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1517 cqe_type = cqe->fast_path_regular.type;
1518 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1521 case ETH_RX_CQE_TYPE_REGULAR:
1522 fp_cqe = &cqe->fast_path_regular;
1524 case ETH_RX_CQE_TYPE_TPA_START:
1525 cqe_start_tpa = &cqe->fast_path_tpa_start;
1526 tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1527 tpa_start_flg = true;
1528 /* Mark it as LRO packet */
1529 ol_flags |= PKT_RX_LRO;
1530 /* In split mode, seg_len is same as len_on_first_bd
1531 * and ext_bd_len_list will be empty since there are
1532 * no additional buffers
1534 PMD_RX_LOG(INFO, rxq,
1535 "TPA start[%d] - len_on_first_bd %d header %d"
1536 " [bd_list[0] %d], [seg_len %d]\n",
1537 cqe_start_tpa->tpa_agg_index,
1538 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1539 cqe_start_tpa->header_len,
1540 rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
1541 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1544 case ETH_RX_CQE_TYPE_TPA_CONT:
1545 qede_rx_process_tpa_cont_cqe(qdev, rxq,
1546 &cqe->fast_path_tpa_cont);
1548 case ETH_RX_CQE_TYPE_TPA_END:
1549 qede_rx_process_tpa_end_cqe(qdev, rxq,
1550 &cqe->fast_path_tpa_end);
1551 tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1552 tpa_info = &rxq->tpa_info[tpa_agg_idx];
1553 rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1555 case ETH_RX_CQE_TYPE_SLOW_PATH:
1556 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1557 ecore_eth_cqe_completion(
1558 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1559 (struct eth_slow_path_rx_cqe *)cqe);
1565 /* Get the data from the SW ring */
1566 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1567 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
1568 assert(rx_mb != NULL);
1570 /* Handle regular CQE or TPA start CQE */
1571 if (!tpa_start_flg) {
1572 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1573 offset = fp_cqe->placement_offset;
1574 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1575 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1576 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1577 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1578 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1579 bitfield_val = fp_cqe->bitfields;
1583 rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1584 offset = cqe_start_tpa->placement_offset;
1585 /* seg_len = len_on_first_bd */
1586 len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1587 vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1588 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1589 bitfield_val = cqe_start_tpa->bitfields;
1591 rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1593 if (qede_tunn_exist(parse_flag)) {
1594 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1595 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1596 PMD_RX_LOG(ERR, rxq,
1597 "L4 csum failed, flags = 0x%x\n",
1599 rxq->rx_hw_errors++;
1600 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1602 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1605 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1606 PMD_RX_LOG(ERR, rxq,
1607 "Outer L3 csum failed, flags = 0x%x\n",
1609 rxq->rx_hw_errors++;
1610 ol_flags |= PKT_RX_EIP_CKSUM_BAD;
1612 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1616 flags = cqe_start_tpa->tunnel_pars_flags.flags;
1618 flags = fp_cqe->tunnel_pars_flags.flags;
1619 tunn_parse_flag = flags;
1623 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1627 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1629 /* Outer L3/L4 types is not available in CQE */
1630 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1632 /* Outer L3/L4 types is not available in CQE.
1633 * Need to add offset to parse correctly,
1635 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1636 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1638 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1641 /* Common handling for non-tunnel packets and for inner
1642 * headers in the case of tunnel.
1644 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1645 PMD_RX_LOG(ERR, rxq,
1646 "L4 csum failed, flags = 0x%x\n",
1648 rxq->rx_hw_errors++;
1649 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1651 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1653 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1654 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1656 rxq->rx_hw_errors++;
1657 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1659 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1662 if (CQE_HAS_VLAN(parse_flag) ||
1663 CQE_HAS_OUTER_VLAN(parse_flag)) {
1664 /* Note: FW doesn't indicate Q-in-Q packet */
1665 ol_flags |= PKT_RX_VLAN;
1666 if (qdev->vlan_strip_flg) {
1667 ol_flags |= PKT_RX_VLAN_STRIPPED;
1668 rx_mb->vlan_tci = vlan_tci;
1673 if (qdev->rss_enable) {
1674 ol_flags |= PKT_RX_RSS_HASH;
1675 rx_mb->hash.rss = rss_hash;
1679 qede_rx_bd_ring_consume(rxq);
1681 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1682 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1683 " len on first: %04x Total Len: %04x",
1684 fp_cqe->bd_num, len, pkt_len);
1685 num_segs = fp_cqe->bd_num - 1;
1687 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1691 rx_alloc_count += num_segs;
1692 rxq->rx_segs += num_segs;
1694 rxq->rx_segs++; /* for the first segment */
1696 /* Prefetch next mbuf while processing current one. */
1697 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1698 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
1700 /* Update rest of the MBUF fields */
1701 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1702 rx_mb->port = rxq->port_id;
1703 rx_mb->ol_flags = ol_flags;
1704 rx_mb->data_len = len;
1705 rx_mb->packet_type = packet_type;
1706 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1707 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1709 if (!tpa_start_flg) {
1710 rx_mb->nb_segs = fp_cqe->bd_num;
1711 rx_mb->pkt_len = pkt_len;
1713 /* store ref to the updated mbuf */
1714 tpa_info->tpa_head = rx_mb;
1715 tpa_info->tpa_tail = tpa_info->tpa_head;
1717 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1719 if (!tpa_start_flg) {
1720 rx_pkts[rx_pkt] = rx_mb;
1724 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1725 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1726 if (rx_pkt == nb_pkts) {
1727 PMD_RX_LOG(DEBUG, rxq,
1728 "Budget reached nb_pkts=%u received=%u",
1734 /* Request number of bufferes to be allocated in next loop */
1735 rxq->rx_alloc_count = rx_alloc_count;
1737 rxq->rcv_pkts += rx_pkt;
1739 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1745 /* Populate scatter gather buffer descriptor fields */
1746 static inline uint16_t
1747 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
1748 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
1751 struct qede_tx_queue *txq = p_txq;
1752 struct eth_tx_bd *tx_bd = NULL;
1754 uint16_t nb_segs = 0;
1756 /* Check for scattered buffers */
1758 if (start_seg == 0) {
1760 *bd2 = (struct eth_tx_2nd_bd *)
1761 ecore_chain_produce(&txq->tx_pbl);
1762 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
1765 mapping = rte_mbuf_data_iova(m_seg);
1766 QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
1767 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
1768 } else if (start_seg == 1) {
1770 *bd3 = (struct eth_tx_3rd_bd *)
1771 ecore_chain_produce(&txq->tx_pbl);
1772 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
1775 mapping = rte_mbuf_data_iova(m_seg);
1776 QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
1777 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
1779 tx_bd = (struct eth_tx_bd *)
1780 ecore_chain_produce(&txq->tx_pbl);
1781 memset(tx_bd, 0, sizeof(*tx_bd));
1783 mapping = rte_mbuf_data_iova(m_seg);
1784 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
1785 PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
1788 m_seg = m_seg->next;
1791 /* Return total scattered buffers */
1795 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1797 print_tx_bd_info(struct qede_tx_queue *txq,
1798 struct eth_tx_1st_bd *bd1,
1799 struct eth_tx_2nd_bd *bd2,
1800 struct eth_tx_3rd_bd *bd3,
1801 uint64_t tx_ol_flags)
1803 char ol_buf[256] = { 0 }; /* for verbose prints */
1806 PMD_TX_LOG(INFO, txq,
1807 "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
1808 rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
1809 bd1->data.bd_flags.bitfields,
1810 rte_cpu_to_le_16(bd1->data.bitfields));
1812 PMD_TX_LOG(INFO, txq,
1813 "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
1814 rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
1815 bd2->data.bitfields2, bd2->data.tunn_ip_size);
1817 PMD_TX_LOG(INFO, txq,
1818 "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
1819 "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
1820 rte_cpu_to_le_16(bd3->nbytes),
1821 rte_cpu_to_le_16(bd3->data.bitfields),
1822 rte_cpu_to_le_16(bd3->data.lso_mss),
1823 bd3->data.tunn_l4_hdr_start_offset_w,
1824 bd3->data.tunn_hdr_size_w);
1826 rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
1827 PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
1831 /* TX prepare to check packets meets TX conditions */
1833 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1834 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
1837 struct qede_tx_queue *txq = p_txq;
1839 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
1846 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1850 for (i = 0; i < nb_pkts; i++) {
1852 ol_flags = m->ol_flags;
1853 if (ol_flags & PKT_TX_TCP_SEG) {
1854 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
1858 /* TBD: confirm its ~9700B for both ? */
1859 if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
1864 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
1869 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
1870 /* We support only limited tunnel protocols */
1871 if (ol_flags & PKT_TX_TUNNEL_MASK) {
1874 temp = ol_flags & PKT_TX_TUNNEL_MASK;
1875 if (temp == PKT_TX_TUNNEL_VXLAN ||
1876 temp == PKT_TX_TUNNEL_GENEVE ||
1877 temp == PKT_TX_TUNNEL_MPLSINUDP ||
1878 temp == PKT_TX_TUNNEL_GRE)
1882 rte_errno = ENOTSUP;
1886 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1887 ret = rte_validate_tx_offload(m);
1895 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1896 if (unlikely(i != nb_pkts))
1897 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
1903 #define MPLSINUDP_HDR_SIZE (12)
1905 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1907 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
1908 struct qede_tx_queue *txq)
1910 if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
1911 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
1912 if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
1913 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
1914 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
1915 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
1916 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
1917 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1918 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
1919 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
1920 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1925 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1927 struct qede_tx_queue *txq = p_txq;
1928 struct qede_dev *qdev = txq->qdev;
1929 struct ecore_dev *edev = &qdev->edev;
1930 struct rte_mbuf *mbuf;
1931 struct rte_mbuf *m_seg = NULL;
1932 uint16_t nb_tx_pkts;
1936 uint16_t nb_pkt_sent = 0;
1940 __rte_unused bool tunn_flg;
1941 bool tunn_ipv6_ext_flg;
1942 struct eth_tx_1st_bd *bd1;
1943 struct eth_tx_2nd_bd *bd2;
1944 struct eth_tx_3rd_bd *bd3;
1945 uint64_t tx_ol_flags;
1949 uint8_t bd1_bd_flags_bf;
1958 uint8_t tunn_l4_hdr_start_offset;
1959 uint8_t tunn_hdr_size;
1960 uint8_t inner_l2_hdr_size;
1961 uint16_t inner_l4_hdr_offset;
1963 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1964 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
1965 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1966 qede_process_tx_compl(edev, txq);
1969 nb_tx_pkts = nb_pkts;
1970 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1971 while (nb_tx_pkts--) {
1972 /* Init flags/values */
1982 bd1_bd_flags_bf = 0;
1987 mplsoudp_flg = false;
1988 tunn_ipv6_ext_flg = false;
1990 tunn_l4_hdr_start_offset = 0;
1995 /* Check minimum TX BDS availability against available BDs */
1996 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
1999 tx_ol_flags = mbuf->ol_flags;
2000 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2002 /* TX prepare would have already checked supported tunnel Tx
2003 * offloads. Don't rely on pkt_type marked by Rx, instead use
2004 * tx_ol_flags to decide.
2006 tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
2009 /* Check against max which is Tunnel IPv6 + ext */
2010 if (unlikely(txq->nb_tx_avail <
2011 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
2014 /* First indicate its a tunnel pkt */
2015 bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
2016 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2017 /* Legacy FW had flipped behavior in regard to this bit
2018 * i.e. it needed to set to prevent FW from touching
2019 * encapsulated packets when it didn't need to.
2021 if (unlikely(txq->is_legacy)) {
2023 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2026 /* Outer IP checksum offload */
2027 if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
2028 PKT_TX_OUTER_IPV4)) {
2030 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
2031 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
2035 * Currently, only inner checksum offload in MPLS-in-UDP
2036 * tunnel with one MPLS label is supported. Both outer
2037 * and inner layers lengths need to be provided in
2040 if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
2041 PKT_TX_TUNNEL_MPLSINUDP) {
2042 mplsoudp_flg = true;
2043 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2044 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
2046 /* Outer L4 offset in two byte words */
2047 tunn_l4_hdr_start_offset =
2048 (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
2049 /* Tunnel header size in two byte words */
2050 tunn_hdr_size = (mbuf->outer_l2_len +
2051 mbuf->outer_l3_len +
2052 MPLSINUDP_HDR_SIZE) / 2;
2053 /* Inner L2 header size in two byte words */
2054 inner_l2_hdr_size = (mbuf->l2_len -
2055 MPLSINUDP_HDR_SIZE) / 2;
2056 /* Inner L4 header offset from the beggining
2057 * of inner packet in two byte words
2059 inner_l4_hdr_offset = (mbuf->l2_len -
2060 MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
2062 /* Inner L2 size and address type */
2063 bd2_bf1 |= (inner_l2_hdr_size &
2064 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
2065 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
2066 bd2_bf1 |= (UNICAST_ADDRESS &
2067 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
2068 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
2069 /* Treated as IPv6+Ext */
2071 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
2073 /* Mark inner IPv6 if present */
2074 if (tx_ol_flags & PKT_TX_IPV6)
2076 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
2078 /* Inner L4 offsets */
2079 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2080 (tx_ol_flags & (PKT_TX_UDP_CKSUM |
2081 PKT_TX_TCP_CKSUM))) {
2082 /* Determines if BD3 is needed */
2083 tunn_ipv6_ext_flg = true;
2084 if ((tx_ol_flags & PKT_TX_L4_MASK) ==
2087 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
2090 /* TODO other pseudo checksum modes are
2094 ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
2095 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
2096 bd2_bf2 |= (inner_l4_hdr_offset &
2097 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
2098 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
2100 } /* End MPLSoUDP */
2101 } /* End Tunnel handling */
2103 if (tx_ol_flags & PKT_TX_TCP_SEG) {
2105 if (unlikely(txq->nb_tx_avail <
2106 ETH_TX_MIN_BDS_PER_LSO_PKT))
2108 /* For LSO, packet header and payload must reside on
2109 * buffers pointed by different BDs. Using BD1 for HDR
2110 * and BD2 onwards for data.
2112 hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
2114 hdr_size += mbuf->outer_l2_len +
2117 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
2119 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2120 /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
2122 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2123 mss = rte_cpu_to_le_16(mbuf->tso_segsz);
2124 /* Using one header BD */
2125 bd3_bf |= rte_cpu_to_le_16(1 <<
2126 ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2128 if (unlikely(txq->nb_tx_avail <
2129 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2132 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2133 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2136 /* Descriptor based VLAN insertion */
2137 if (tx_ol_flags & PKT_TX_VLAN_PKT) {
2138 vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
2140 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
2143 /* Offload the IP checksum in the hardware */
2144 if (tx_ol_flags & PKT_TX_IP_CKSUM) {
2146 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2147 /* There's no DPDK flag to request outer-L4 csum
2148 * offload. But in the case of tunnel if inner L3 or L4
2149 * csum offload is requested then we need to force
2150 * recalculation of L4 tunnel header csum also.
2152 if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
2153 PKT_TX_TUNNEL_GRE)) {
2155 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2156 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2160 /* L4 checksum offload (tcp or udp) */
2161 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2162 (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
2164 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2165 /* There's no DPDK flag to request outer-L4 csum
2166 * offload. But in the case of tunnel if inner L3 or L4
2167 * csum offload is requested then we need to force
2168 * recalculation of L4 tunnel header csum also.
2172 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2173 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2177 /* Fill the entry in the SW ring and the BDs in the FW ring */
2179 txq->sw_tx_ring[idx].mbuf = mbuf;
2182 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2183 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2186 /* Map MBUF linear data for DMA and set in the BD1 */
2187 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2189 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2190 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2191 bd1->data.vlan = vlan;
2193 if (lso_flg || mplsoudp_flg) {
2194 bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2196 memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2200 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2203 QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2204 rte_mbuf_data_iova(mbuf)),
2205 mbuf->data_len - hdr_size);
2206 bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2208 bd2->data.bitfields2 =
2209 rte_cpu_to_le_16(bd2_bf2);
2211 bd2->data.tunn_ip_size =
2212 rte_cpu_to_le_16(mbuf->outer_l3_len);
2215 if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2216 bd3 = (struct eth_tx_3rd_bd *)
2217 ecore_chain_produce(&txq->tx_pbl);
2218 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2220 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2222 bd3->data.lso_mss = mss;
2224 bd3->data.tunn_l4_hdr_start_offset_w =
2225 tunn_l4_hdr_start_offset;
2226 bd3->data.tunn_hdr_size_w =
2232 /* Handle fragmented MBUF */
2235 /* Encode scatter gather buffer descriptors if required */
2236 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2237 bd1->data.nbds = nbds + nb_frags;
2239 txq->nb_tx_avail -= bd1->data.nbds;
2242 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2243 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2244 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2250 /* Write value of prod idx into bd_prod */
2251 txq->tx_db.data.bd_prod = bd_prod;
2253 rte_compiler_barrier();
2254 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2257 /* Check again for Tx completions */
2258 qede_process_tx_compl(edev, txq);
2260 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2261 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2267 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
2268 __rte_unused struct rte_mbuf **pkts,
2269 __rte_unused uint16_t nb_pkts)
2275 /* this function does a fake walk through over completion queue
2276 * to calculate number of BDs used by HW.
2277 * At the end, it restores the state of completion queue.
2280 qede_parse_fp_cqe(struct qede_rx_queue *rxq)
2282 uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
2283 union eth_rx_cqe *cqe, *orig_cqe = NULL;
2285 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
2286 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2288 if (hw_comp_cons == sw_comp_cons)
2291 /* Get the CQE from the completion ring */
2292 cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2295 while (sw_comp_cons != hw_comp_cons) {
2296 switch (cqe->fast_path_regular.type) {
2297 case ETH_RX_CQE_TYPE_REGULAR:
2298 bd_count += cqe->fast_path_regular.bd_num;
2300 case ETH_RX_CQE_TYPE_TPA_END:
2301 bd_count += cqe->fast_path_tpa_end.num_of_bds;
2308 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2309 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2312 /* revert comp_ring to original state */
2313 ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
2319 qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
2321 uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
2322 uint16_t produced, consumed;
2323 struct qede_rx_queue *rxq = p_rxq;
2325 if (offset > rxq->nb_rx_desc)
2328 sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
2329 sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
2331 /* find BDs used by HW from completion queue elements */
2332 hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
2334 if (hw_bd_cons < sw_bd_cons)
2335 /* wraparound case */
2336 consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
2338 consumed = hw_bd_cons - sw_bd_cons;
2340 if (offset <= consumed)
2341 return RTE_ETH_RX_DESC_DONE;
2343 if (sw_bd_prod < sw_bd_cons)
2344 /* wraparound case */
2345 produced = (0xffff - sw_bd_cons) + sw_bd_prod;
2347 produced = sw_bd_prod - sw_bd_cons;
2349 if (offset <= produced)
2350 return RTE_ETH_RX_DESC_AVAIL;
2352 return RTE_ETH_RX_DESC_UNAVAIL;