2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 static bool gro_disable = 1; /* mod_param */
13 #define QEDE_FASTPATH_TX (1 << 0)
14 #define QEDE_FASTPATH_RX (1 << 1)
16 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
18 struct rte_mbuf *new_mb = NULL;
19 struct eth_rx_bd *rx_bd;
21 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
23 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
24 if (unlikely(!new_mb)) {
26 "Failed to allocate rx buffer "
27 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
28 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
29 rte_mempool_avail_count(rxq->mb_pool),
30 rte_mempool_in_use_count(rxq->mb_pool));
33 rxq->sw_rx_ring[idx].mbuf = new_mb;
34 rxq->sw_rx_ring[idx].page_offset = 0;
35 mapping = rte_mbuf_data_dma_addr_default(new_mb);
36 /* Advance PROD and get BD pointer */
37 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
38 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
39 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
44 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
48 if (rxq->sw_rx_ring != NULL) {
49 for (i = 0; i < rxq->nb_rx_desc; i++) {
50 if (rxq->sw_rx_ring[i].mbuf != NULL) {
51 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
52 rxq->sw_rx_ring[i].mbuf = NULL;
58 void qede_rx_queue_release(void *rx_queue)
60 struct qede_rx_queue *rxq = rx_queue;
63 qede_rx_queue_release_mbufs(rxq);
64 rte_free(rxq->sw_rx_ring);
65 rxq->sw_rx_ring = NULL;
71 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
75 PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
77 if (txq->sw_tx_ring) {
78 for (i = 0; i < txq->nb_tx_desc; i++) {
79 if (txq->sw_tx_ring[i].mbuf) {
80 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
81 txq->sw_tx_ring[i].mbuf = NULL;
88 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
89 uint16_t nb_desc, unsigned int socket_id,
90 const struct rte_eth_rxconf *rx_conf,
91 struct rte_mempool *mp)
93 struct qede_dev *qdev = dev->data->dev_private;
94 struct ecore_dev *edev = &qdev->edev;
95 struct rte_eth_dev_data *eth_data = dev->data;
96 struct qede_rx_queue *rxq;
97 uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
103 PMD_INIT_FUNC_TRACE(edev);
105 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
106 if (!rte_is_power_of_2(nb_desc)) {
107 DP_ERR(edev, "Ring size %u is not power of 2\n",
112 /* Free memory prior to re-allocation if needed... */
113 if (dev->data->rx_queues[queue_idx] != NULL) {
114 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
115 dev->data->rx_queues[queue_idx] = NULL;
118 /* First allocate the rx queue data structure */
119 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
120 RTE_CACHE_LINE_SIZE, socket_id);
123 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
130 rxq->nb_rx_desc = nb_desc;
131 rxq->queue_id = queue_idx;
132 rxq->port_id = dev->data->port_id;
135 data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
136 RTE_PKTMBUF_HEADROOM;
139 rxq->rx_buf_size = data_size;
141 DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
142 qdev->mtu, rxq->rx_buf_size);
144 if (pkt_len > ETHER_MAX_LEN) {
145 dev->data->dev_conf.rxmode.jumbo_frame = 1;
146 DP_NOTICE(edev, false, "jumbo frame enabled\n");
148 dev->data->dev_conf.rxmode.jumbo_frame = 0;
151 /* Allocate the parallel driver ring for Rx buffers */
152 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
153 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
154 RTE_CACHE_LINE_SIZE, socket_id);
155 if (!rxq->sw_rx_ring) {
156 DP_NOTICE(edev, false,
157 "Unable to alloc memory for sw_rx_ring on socket %u\n",
164 /* Allocate FW Rx ring */
165 rc = qdev->ops->common->chain_alloc(edev,
166 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
167 ECORE_CHAIN_MODE_NEXT_PTR,
168 ECORE_CHAIN_CNT_TYPE_U16,
170 sizeof(struct eth_rx_bd),
173 if (rc != ECORE_SUCCESS) {
174 DP_NOTICE(edev, false,
175 "Unable to alloc memory for rxbd ring on socket %u\n",
177 rte_free(rxq->sw_rx_ring);
178 rxq->sw_rx_ring = NULL;
184 /* Allocate FW completion ring */
185 rc = qdev->ops->common->chain_alloc(edev,
186 ECORE_CHAIN_USE_TO_CONSUME,
187 ECORE_CHAIN_MODE_PBL,
188 ECORE_CHAIN_CNT_TYPE_U16,
190 sizeof(union eth_rx_cqe),
193 if (rc != ECORE_SUCCESS) {
194 DP_NOTICE(edev, false,
195 "Unable to alloc memory for cqe ring on socket %u\n",
197 /* TBD: Freeing RX BD ring */
198 rte_free(rxq->sw_rx_ring);
199 rxq->sw_rx_ring = NULL;
204 /* Allocate buffers for the Rx ring */
205 for (i = 0; i < rxq->nb_rx_desc; i++) {
206 rc = qede_alloc_rx_buffer(rxq);
208 DP_NOTICE(edev, false,
209 "RX buffer allocation failed at idx=%d\n", i);
214 dev->data->rx_queues[queue_idx] = rxq;
216 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
217 queue_idx, nb_desc, qdev->mtu, socket_id);
221 qede_rx_queue_release(rxq);
225 void qede_tx_queue_release(void *tx_queue)
227 struct qede_tx_queue *txq = tx_queue;
230 qede_tx_queue_release_mbufs(txq);
231 if (txq->sw_tx_ring) {
232 rte_free(txq->sw_tx_ring);
233 txq->sw_tx_ring = NULL;
241 qede_tx_queue_setup(struct rte_eth_dev *dev,
244 unsigned int socket_id,
245 const struct rte_eth_txconf *tx_conf)
247 struct qede_dev *qdev = dev->data->dev_private;
248 struct ecore_dev *edev = &qdev->edev;
249 struct qede_tx_queue *txq;
252 PMD_INIT_FUNC_TRACE(edev);
254 if (!rte_is_power_of_2(nb_desc)) {
255 DP_ERR(edev, "Ring size %u is not power of 2\n",
260 /* Free memory prior to re-allocation if needed... */
261 if (dev->data->tx_queues[queue_idx] != NULL) {
262 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
263 dev->data->tx_queues[queue_idx] = NULL;
266 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
267 RTE_CACHE_LINE_SIZE, socket_id);
271 "Unable to allocate memory for txq on socket %u",
276 txq->nb_tx_desc = nb_desc;
278 txq->port_id = dev->data->port_id;
280 rc = qdev->ops->common->chain_alloc(edev,
281 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
282 ECORE_CHAIN_MODE_PBL,
283 ECORE_CHAIN_CNT_TYPE_U16,
285 sizeof(union eth_tx_bd_types),
287 if (rc != ECORE_SUCCESS) {
289 "Unable to allocate memory for txbd ring on socket %u",
291 qede_tx_queue_release(txq);
295 /* Allocate software ring */
296 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
297 (sizeof(struct qede_tx_entry) *
299 RTE_CACHE_LINE_SIZE, socket_id);
301 if (!txq->sw_tx_ring) {
303 "Unable to allocate memory for txbd ring on socket %u",
305 qede_tx_queue_release(txq);
309 txq->queue_id = queue_idx;
311 txq->nb_tx_avail = txq->nb_tx_desc;
313 txq->tx_free_thresh =
314 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
315 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
317 dev->data->tx_queues[queue_idx] = txq;
320 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
321 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
326 /* This function inits fp content and resets the SB, RXQ and TXQ arrays */
327 static void qede_init_fp(struct rte_eth_dev *eth_dev)
329 struct qede_fastpath *fp;
330 uint8_t i, rss_id, index, tc;
331 struct qede_dev *qdev = eth_dev->data->dev_private;
332 int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
334 memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
335 sizeof(*qdev->fp_array)));
336 memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
337 sizeof(*qdev->sb_array)));
339 fp = &qdev->fp_array[i];
341 fp->type = QEDE_FASTPATH_RX;
344 fp->type = QEDE_FASTPATH_TX;
349 fp = &qdev->fp_array[i];
353 /* Point rxq to generic rte queues that was created
354 * as part of queue creation.
356 if (fp->type & QEDE_FASTPATH_RX) {
357 fp->rxq = eth_dev->data->rx_queues[i];
358 fp->rxq->queue_id = rxq++;
360 fp->sb_info = &qdev->sb_array[i];
362 if (fp->type & QEDE_FASTPATH_TX) {
363 for (tc = 0; tc < qdev->num_tc; tc++) {
364 index = tc * QEDE_TSS_CNT(qdev) + txq;
365 fp->txqs[tc] = eth_dev->data->tx_queues[index];
366 fp->txqs[tc]->queue_id = index;
370 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
373 qdev->gro_disable = gro_disable;
376 void qede_free_fp_arrays(struct qede_dev *qdev)
378 /* It asseumes qede_free_mem_load() is called before */
379 if (qdev->fp_array != NULL) {
380 rte_free(qdev->fp_array);
381 qdev->fp_array = NULL;
384 if (qdev->sb_array != NULL) {
385 rte_free(qdev->sb_array);
386 qdev->sb_array = NULL;
390 int qede_alloc_fp_array(struct qede_dev *qdev)
392 struct qede_fastpath *fp;
393 struct ecore_dev *edev = &qdev->edev;
396 qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
397 sizeof(*qdev->fp_array),
398 RTE_CACHE_LINE_SIZE);
400 if (!qdev->fp_array) {
401 DP_ERR(edev, "fp array allocation failed\n");
405 qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
406 sizeof(*qdev->sb_array),
407 RTE_CACHE_LINE_SIZE);
409 if (!qdev->sb_array) {
410 DP_ERR(edev, "sb array allocation failed\n");
411 rte_free(qdev->fp_array);
418 /* This function allocates fast-path status block memory */
420 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
423 struct ecore_dev *edev = &qdev->edev;
424 struct status_block *sb_virt;
428 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
431 DP_ERR(edev, "Status block allocation failed\n");
435 rc = qdev->ops->common->sb_init(edev, sb_info,
436 sb_virt, sb_phys, sb_id,
437 QED_SB_TYPE_L2_QUEUE);
439 DP_ERR(edev, "Status block initialization failed\n");
440 /* TBD: No dma_free_coherent possible */
448 qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
450 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
451 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
452 struct eth_rx_prod_data rx_prods = { 0 };
454 /* Update producers */
455 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
456 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
458 /* Make sure that the BD and SGE data is updated before updating the
459 * producers since FW might read the BD/SGE right after the producer
464 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
465 (uint32_t *)&rx_prods);
467 /* mmiowb is needed to synchronize doorbell writes from more than one
468 * processor. It guarantees that the write arrives to the device before
469 * the napi lock is released and another qede_poll is called (possibly
470 * on another CPU). Without this barrier, the next doorbell can bypass
471 * this doorbell. This is applicable to IA64/Altix systems.
475 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u\n", bd_prod, cqe_prod);
478 static inline uint32_t
479 qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
481 return index % n_rx_rings;
484 static void qede_prandom_bytes(uint32_t *buff, size_t bytes)
488 srand((unsigned int)time(NULL));
490 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
495 qede_config_rss(struct rte_eth_dev *eth_dev,
496 struct qed_update_vport_rss_params *rss_params)
498 struct rte_eth_rss_conf rss_conf;
499 enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode;
500 struct qede_dev *qdev = eth_dev->data->dev_private;
501 struct ecore_dev *edev = &qdev->edev;
507 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
508 key = (uint32_t *)rss_conf.rss_key;
509 hf = rss_conf.rss_hf;
510 PMD_INIT_FUNC_TRACE(edev);
512 /* Check if RSS conditions are met.
513 * Note: Even though its meaningless to enable RSS with one queue, it
514 * could be used to produce RSS Hash, so skipping that check.
517 if (!(mode & ETH_MQ_RX_RSS)) {
518 DP_INFO(edev, "RSS flag is not set\n");
522 DP_INFO(edev, "RSS flag is set\n");
524 if (rss_conf.rss_hf == 0)
525 DP_NOTICE(edev, false, "RSS hash function = 0, disables RSS\n");
527 if (rss_conf.rss_key != NULL)
528 memcpy(qdev->rss_params.rss_key, rss_conf.rss_key,
529 rss_conf.rss_key_len);
531 memset(rss_params, 0, sizeof(*rss_params));
533 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
534 rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i,
537 /* key and protocols */
538 if (rss_conf.rss_key == NULL)
539 qede_prandom_bytes(rss_params->rss_key,
540 sizeof(rss_params->rss_key));
542 memcpy(rss_params->rss_key, rss_conf.rss_key,
543 rss_conf.rss_key_len);
546 rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
547 rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
548 rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
549 rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
550 rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
551 rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
553 rss_params->rss_caps = rss_caps;
555 DP_INFO(edev, "RSS check passes\n");
560 static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
562 struct qede_dev *qdev = eth_dev->data->dev_private;
563 struct ecore_dev *edev = &qdev->edev;
564 struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
565 struct qed_dev_info *qed_info = &qdev->dev_info.common;
566 struct qed_update_vport_params vport_update_params;
567 struct qed_start_vport_params start = { 0 };
568 int vlan_removal_en = 1;
571 if (!qdev->fp_num_rx) {
573 "Cannot update V-VPORT as active as "
574 "there are no Rx queues\n");
578 start.remove_inner_vlan = vlan_removal_en;
579 start.gro_enable = !qdev->gro_disable;
580 start.mtu = qdev->mtu;
582 start.drop_ttl0 = true;
583 start.clear_stats = clear_stats;
585 rc = qdev->ops->vport_start(edev, &start);
587 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
592 "Start vport ramrod passed, vport_id = %d,"
593 " MTU = %d, vlan_removal_en = %d\n",
594 start.vport_id, qdev->mtu, vlan_removal_en);
597 struct qede_fastpath *fp = &qdev->fp_array[i];
601 if (fp->type & QEDE_FASTPATH_RX) {
602 tbl = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
603 cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
605 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
607 rc = qdev->ops->q_rx_start(edev, i, fp->rxq->queue_id,
609 fp->sb_info->igu_sb_id,
611 fp->rxq->rx_buf_size,
612 fp->rxq->rx_bd_ring.p_phys_addr,
615 &fp->rxq->hw_rxq_prod_addr);
618 "Start rxq #%d failed %d\n",
619 fp->rxq->queue_id, rc);
623 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
625 qede_update_rx_prod(qdev, fp->rxq);
628 if (!(fp->type & QEDE_FASTPATH_TX))
630 for (tc = 0; tc < qdev->num_tc; tc++) {
631 struct qede_tx_queue *txq = fp->txqs[tc];
632 int txq_index = tc * QEDE_RSS_CNT(qdev) + i;
634 tbl = ecore_chain_get_pbl_phys(&txq->tx_pbl);
635 cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
636 rc = qdev->ops->q_tx_start(edev, i, txq->queue_id,
638 fp->sb_info->igu_sb_id,
641 &txq->doorbell_addr);
643 DP_ERR(edev, "Start txq %u failed %d\n",
649 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
650 SET_FIELD(txq->tx_db.data.params,
651 ETH_DB_DATA_DEST, DB_DEST_XCM);
652 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
654 SET_FIELD(txq->tx_db.data.params,
655 ETH_DB_DATA_AGG_VAL_SEL,
656 DQ_XCM_ETH_TX_BD_PROD_CMD);
658 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
662 /* Prepare and send the vport enable */
663 memset(&vport_update_params, 0, sizeof(vport_update_params));
664 vport_update_params.vport_id = start.vport_id;
665 vport_update_params.update_vport_active_flg = 1;
666 vport_update_params.vport_active_flg = 1;
669 if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
670 /* TBD: Check SRIOV enabled for VF */
671 vport_update_params.update_tx_switching_flg = 1;
672 vport_update_params.tx_switching_flg = 1;
675 if (!qede_config_rss(eth_dev, rss_params)) {
676 vport_update_params.update_rss_flg = 1;
678 qdev->rss_enabled = 1;
679 DP_INFO(edev, "Updating RSS flag\n");
681 qdev->rss_enabled = 0;
682 DP_INFO(edev, "Not Updating RSS flag\n");
685 rte_memcpy(&vport_update_params.rss_params, rss_params,
686 sizeof(*rss_params));
688 rc = qdev->ops->vport_update(edev, &vport_update_params);
690 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
698 static bool qede_tunn_exist(uint16_t flag)
700 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
701 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
704 static inline uint8_t qede_check_tunn_csum(uint16_t flag)
707 uint16_t csum_flag = 0;
709 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
710 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
711 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
712 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
714 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
715 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
716 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
717 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
718 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
721 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
722 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
723 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
724 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
726 if (csum_flag & flag)
727 return QEDE_CSUM_ERROR;
729 return QEDE_CSUM_UNNECESSARY | tcsum;
732 static inline uint8_t qede_tunn_exist(uint16_t flag)
737 static inline uint8_t qede_check_tunn_csum(uint16_t flag)
743 static inline uint8_t qede_check_notunn_csum(uint16_t flag)
746 uint16_t csum_flag = 0;
748 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
749 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
750 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
751 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
752 csum = QEDE_CSUM_UNNECESSARY;
755 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
756 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
758 if (csum_flag & flag)
759 return QEDE_CSUM_ERROR;
764 static inline uint8_t qede_check_csum(uint16_t flag)
766 if (likely(!qede_tunn_exist(flag)))
767 return qede_check_notunn_csum(flag);
769 return qede_check_tunn_csum(flag);
772 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
774 ecore_chain_consume(&rxq->rx_bd_ring);
779 qede_reuse_page(struct qede_dev *qdev,
780 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
782 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
783 uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
784 struct qede_rx_entry *curr_prod;
785 dma_addr_t new_mapping;
787 curr_prod = &rxq->sw_rx_ring[idx];
788 *curr_prod = *curr_cons;
790 new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
791 curr_prod->page_offset;
793 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
794 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
800 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
801 struct qede_dev *qdev, uint8_t count)
803 struct qede_rx_entry *curr_cons;
805 for (; count > 0; count--) {
806 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
807 qede_reuse_page(qdev, rxq, curr_cons);
808 qede_rx_bd_ring_consume(rxq);
812 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
815 /* TBD - L4 indications needed ? */
816 uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
817 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);
819 /* protocol = 3 means LLC/SNAP over Ethernet */
820 if (unlikely(protocol == 0 || protocol == 3))
821 p_type = RTE_PTYPE_UNKNOWN;
822 else if (protocol == 1)
823 p_type = RTE_PTYPE_L3_IPV4;
824 else if (protocol == 2)
825 p_type = RTE_PTYPE_L3_IPV6;
827 return RTE_PTYPE_L2_ETHER | p_type;
831 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
833 struct qede_rx_queue *rxq = p_rxq;
834 struct qede_dev *qdev = rxq->qdev;
835 struct ecore_dev *edev = &qdev->edev;
836 struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
837 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
839 union eth_rx_cqe *cqe;
840 struct eth_fast_path_rx_reg_cqe *fp_cqe;
841 register struct rte_mbuf *rx_mb = NULL;
842 enum eth_rx_cqe_type cqe_type;
844 uint16_t preload_idx;
847 enum rss_hash_type htype;
849 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
850 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
854 if (hw_comp_cons == sw_comp_cons)
857 while (sw_comp_cons != hw_comp_cons) {
858 /* Get the CQE from the completion ring */
860 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
861 cqe_type = cqe->fast_path_regular.type;
863 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
864 PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
866 qdev->ops->eth_cqe_completion(edev, fp->id,
867 (struct eth_slow_path_rx_cqe *)cqe);
871 /* Get the data from the SW ring */
872 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
873 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
874 assert(rx_mb != NULL);
877 fp_cqe = &cqe->fast_path_regular;
879 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
880 pad = fp_cqe->placement_offset;
881 assert((len + pad) <= rx_mb->buf_len);
883 PMD_RX_LOG(DEBUG, rxq,
884 "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
885 " len = %u, parsing_flags = %d\n",
886 cqe_type, fp_cqe->bitfields,
887 rte_le_to_cpu_16(fp_cqe->vlan_tag),
888 len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
890 /* If this is an error packet then drop it */
892 rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
893 csum_flag = qede_check_csum(parse_flag);
894 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
896 "CQE in CONS = %u has error, flags = 0x%x "
897 "dropping incoming packet\n",
898 sw_comp_cons, parse_flag);
900 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
904 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
906 "New buffer allocation failed,"
907 "dropping incoming packet\n");
908 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
909 rte_eth_devices[rxq->port_id].
910 data->rx_mbuf_alloc_failed++;
911 rxq->rx_alloc_errors++;
915 qede_rx_bd_ring_consume(rxq);
917 /* Prefetch next mbuf while processing current one. */
918 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
919 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
921 if (fp_cqe->bd_num != 1)
922 PMD_RX_LOG(DEBUG, rxq,
923 "Jumbo-over-BD packet not supported\n");
925 /* Update MBUF fields */
927 rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
929 rx_mb->data_len = len;
930 rx_mb->pkt_len = len;
931 rx_mb->port = rxq->port_id;
932 rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
934 htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
935 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
936 if (qdev->rss_enabled && htype) {
937 rx_mb->ol_flags |= PKT_RX_RSS_HASH;
938 rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
939 PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
943 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
945 if (CQE_HAS_VLAN(parse_flag)) {
946 rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
947 rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
950 if (CQE_HAS_OUTER_VLAN(parse_flag)) {
951 /* FW does not provide indication of Outer VLAN tag,
952 * which is always stripped, so vlan_tci_outer is set
953 * to 0. Here vlan_tag represents inner VLAN tag.
955 rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
956 rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
959 rx_pkts[rx_pkt] = rx_mb;
962 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
963 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
964 if (rx_pkt == nb_pkts) {
965 PMD_RX_LOG(DEBUG, rxq,
966 "Budget reached nb_pkts=%u received=%u\n",
972 qede_update_rx_prod(qdev, rxq);
974 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
980 qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
982 uint16_t idx = TX_CONS(txq);
983 struct eth_tx_bd *tx_data_bd;
984 struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
986 if (unlikely(!mbuf)) {
988 "null mbuf nb_tx_desc %u nb_tx_avail %u "
989 "sw_tx_cons %u sw_tx_prod %u\n",
990 txq->nb_tx_desc, txq->nb_tx_avail, idx,
996 rte_pktmbuf_free_seg(mbuf);
997 txq->sw_tx_ring[idx].mbuf = NULL;
998 ecore_chain_consume(&txq->tx_pbl);
1004 static inline uint16_t
1005 qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
1007 uint16_t tx_compl = 0;
1008 uint16_t hw_bd_cons;
1011 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
1012 rte_compiler_barrier();
1014 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
1015 rc = qede_free_tx_pkt(edev, txq);
1017 DP_NOTICE(edev, false,
1018 "hw_bd_cons = %d, chain_cons=%d\n",
1020 ecore_chain_get_cons_idx(&txq->tx_pbl));
1023 txq->sw_tx_cons++; /* Making TXD available */
1027 PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
1028 tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
1033 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1035 struct qede_tx_queue *txq = p_txq;
1036 struct qede_dev *qdev = txq->qdev;
1037 struct ecore_dev *edev = &qdev->edev;
1038 struct qede_fastpath *fp;
1039 struct eth_tx_1st_bd *first_bd;
1040 uint16_t nb_tx_pkts;
1041 uint16_t nb_pkt_sent = 0;
1046 fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
1048 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1049 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
1050 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1051 (void)qede_process_tx_compl(edev, txq);
1054 nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / MAX_NUM_TX_BDS));
1055 if (unlikely(nb_tx_pkts == 0)) {
1056 PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
1057 nb_pkts, txq->nb_tx_avail);
1061 tx_count = nb_tx_pkts;
1062 while (nb_tx_pkts--) {
1063 /* Fill the entry in the SW ring and the BDs in the FW ring */
1065 struct rte_mbuf *mbuf = *tx_pkts++;
1066 txq->sw_tx_ring[idx].mbuf = mbuf;
1067 first_bd = (struct eth_tx_1st_bd *)
1068 ecore_chain_produce(&txq->tx_pbl);
1069 first_bd->data.bd_flags.bitfields =
1070 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1071 /* Map MBUF linear data for DMA and set in the first BD */
1072 QEDE_BD_SET_ADDR_LEN(first_bd, rte_mbuf_data_dma_addr(mbuf),
1075 /* Descriptor based VLAN insertion */
1076 if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1077 first_bd->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
1078 first_bd->data.bd_flags.bitfields |=
1079 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1082 /* Offload the IP checksum in the hardware */
1083 if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
1084 first_bd->data.bd_flags.bitfields |=
1085 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1088 /* L4 checksum offload (tcp or udp) */
1089 if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
1090 first_bd->data.bd_flags.bitfields |=
1091 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1092 /* IPv6 + extn. -> later */
1094 first_bd->data.nbds = MAX_NUM_TX_BDS;
1096 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
1099 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1103 /* Write value of prod idx into bd_prod */
1104 txq->tx_db.data.bd_prod = bd_prod;
1106 rte_compiler_barrier();
1107 DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
1110 /* Check again for Tx completions */
1111 (void)qede_process_tx_compl(edev, txq);
1113 PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
1114 nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
1119 int qede_dev_start(struct rte_eth_dev *eth_dev)
1121 struct qede_dev *qdev = eth_dev->data->dev_private;
1122 struct ecore_dev *edev = &qdev->edev;
1123 struct qed_link_output link_output;
1124 struct qede_fastpath *fp;
1127 DP_INFO(edev, "Device state is %d\n", qdev->state);
1129 switch (qdev->state) {
1131 DP_INFO(edev, "Device already started\n");
1134 if (qede_alloc_fp_array(qdev))
1136 qede_init_fp(eth_dev);
1139 for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
1140 fp = &qdev->fp_array[i];
1141 if (qede_alloc_mem_sb(qdev, fp->sb_info, i))
1146 DP_INFO(edev, "Unknown state for port %u\n",
1147 eth_dev->data->port_id);
1151 rc = qede_start_queues(eth_dev, true);
1153 DP_ERR(edev, "Failed to start queues\n");
1157 DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
1158 QEDE_RSS_CNT(qdev), qdev->num_tc);
1160 /* Bring-up the link */
1161 qede_dev_set_link_state(eth_dev, true);
1162 qdev->state = QEDE_START;
1163 qede_config_rx_mode(eth_dev);
1165 /* Init the queues */
1166 if (qede_reset_fp_rings(qdev))
1169 DP_INFO(edev, "dev_state is QEDE_START\n");
1174 static int qede_drain_txq(struct qede_dev *qdev,
1175 struct qede_tx_queue *txq, bool allow_drain)
1177 struct ecore_dev *edev = &qdev->edev;
1180 while (txq->sw_tx_cons != txq->sw_tx_prod) {
1181 qede_process_tx_compl(edev, txq);
1184 DP_NOTICE(edev, false,
1185 "Tx queue[%u] is stuck,"
1186 "requesting MCP to drain\n",
1188 rc = qdev->ops->common->drain(edev);
1191 return qede_drain_txq(qdev, txq, false);
1194 DP_NOTICE(edev, false,
1195 "Timeout waiting for tx queue[%d]:"
1196 "PROD=%d, CONS=%d\n",
1197 txq->queue_id, txq->sw_tx_prod,
1203 rte_compiler_barrier();
1206 /* FW finished processing, wait for HW to transmit all tx packets */
1212 static int qede_stop_queues(struct qede_dev *qdev)
1214 struct qed_update_vport_params vport_update_params;
1215 struct ecore_dev *edev = &qdev->edev;
1218 /* Disable the vport */
1219 memset(&vport_update_params, 0, sizeof(vport_update_params));
1220 vport_update_params.vport_id = 0;
1221 vport_update_params.update_vport_active_flg = 1;
1222 vport_update_params.vport_active_flg = 0;
1223 vport_update_params.update_rss_flg = 0;
1225 DP_INFO(edev, "vport_update\n");
1227 rc = qdev->ops->vport_update(edev, &vport_update_params);
1229 DP_ERR(edev, "Failed to update vport\n");
1233 DP_INFO(edev, "Flushing tx queues\n");
1235 /* Flush Tx queues. If needed, request drain from MCP */
1237 struct qede_fastpath *fp = &qdev->fp_array[i];
1239 if (fp->type & QEDE_FASTPATH_TX) {
1240 for (tc = 0; tc < qdev->num_tc; tc++) {
1241 struct qede_tx_queue *txq = fp->txqs[tc];
1243 rc = qede_drain_txq(qdev, txq, true);
1250 /* Stop all Queues in reverse order */
1251 for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
1252 struct qed_stop_rxq_params rx_params;
1254 /* Stop the Tx Queue(s) */
1255 if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
1256 for (tc = 0; tc < qdev->num_tc; tc++) {
1257 struct qed_stop_txq_params tx_params;
1260 tx_params.rss_id = i;
1261 val = qdev->fp_array[i].txqs[tc]->queue_id;
1262 tx_params.tx_queue_id = val;
1264 DP_INFO(edev, "Stopping tx queues\n");
1265 rc = qdev->ops->q_tx_stop(edev, &tx_params);
1267 DP_ERR(edev, "Failed to stop TXQ #%d\n",
1268 tx_params.tx_queue_id);
1274 /* Stop the Rx Queue */
1275 if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
1276 memset(&rx_params, 0, sizeof(rx_params));
1277 rx_params.rss_id = i;
1278 rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id;
1279 rx_params.eq_completion_only = 1;
1281 DP_INFO(edev, "Stopping rx queues\n");
1283 rc = qdev->ops->q_rx_stop(edev, &rx_params);
1285 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1291 DP_INFO(edev, "Stopping vports\n");
1293 /* Stop the vport */
1294 rc = qdev->ops->vport_stop(edev, 0);
1296 DP_ERR(edev, "Failed to stop VPORT\n");
1301 int qede_reset_fp_rings(struct qede_dev *qdev)
1303 struct qede_fastpath *fp;
1304 struct qede_tx_queue *txq;
1308 for_each_queue(id) {
1309 DP_INFO(&qdev->edev, "Reset FP chain for RSS %u\n", id);
1310 fp = &qdev->fp_array[id];
1312 if (fp->type & QEDE_FASTPATH_RX) {
1313 qede_rx_queue_release_mbufs(fp->rxq);
1314 ecore_chain_reset(&fp->rxq->rx_bd_ring);
1315 ecore_chain_reset(&fp->rxq->rx_comp_ring);
1316 fp->rxq->sw_rx_prod = 0;
1317 fp->rxq->sw_rx_cons = 0;
1318 for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
1319 if (qede_alloc_rx_buffer(fp->rxq)) {
1321 "RX buffer allocation failed\n");
1326 if (fp->type & QEDE_FASTPATH_TX) {
1327 for (tc = 0; tc < qdev->num_tc; tc++) {
1329 ecore_chain_reset(&txq->tx_pbl);
1330 txq->sw_tx_cons = 0;
1331 txq->sw_tx_prod = 0;
1339 /* This function frees all memory of a single fp */
1340 static void qede_free_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
1344 qede_rx_queue_release(fp->rxq);
1345 for (tc = 0; tc < qdev->num_tc; tc++)
1346 qede_tx_queue_release(fp->txqs[tc]);
1349 void qede_free_mem_load(struct qede_dev *qdev)
1353 for_each_queue(rss_id) {
1354 struct qede_fastpath *fp = &qdev->fp_array[rss_id];
1355 qede_free_mem_fp(qdev, fp);
1357 /* qdev->num_rss = 0; */
1360 void qede_dev_stop(struct rte_eth_dev *eth_dev)
1362 struct qede_dev *qdev = eth_dev->data->dev_private;
1363 struct ecore_dev *edev = &qdev->edev;
1365 DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
1367 if (qdev->state != QEDE_START) {
1368 DP_INFO(edev, "Device not yet started\n");
1372 if (qede_stop_queues(qdev))
1373 DP_ERR(edev, "Didn't succeed to close queues\n");
1375 DP_INFO(edev, "Stopped queues\n");
1377 qdev->ops->fastpath_stop(edev);
1379 /* Bring the link down */
1380 qede_dev_set_link_state(eth_dev, false);
1382 qdev->state = QEDE_STOP;
1384 DP_INFO(edev, "dev_state is QEDE_STOP\n");