2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 static bool gro_disable = 1; /* mod_param */
14 rte_mbuf *qede_rxmbuf_alloc(struct rte_mempool *mp)
18 m = __rte_mbuf_raw_alloc(mp);
19 __rte_mbuf_sanity_check(m, 0);
24 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
26 struct rte_mbuf *new_mb = NULL;
27 struct eth_rx_bd *rx_bd;
29 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
31 new_mb = qede_rxmbuf_alloc(rxq->mb_pool);
32 if (unlikely(!new_mb)) {
34 "Failed to allocate rx buffer "
35 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
36 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
37 rte_mempool_count(rxq->mb_pool),
38 rte_mempool_free_count(rxq->mb_pool));
41 rxq->sw_rx_ring[idx].mbuf = new_mb;
42 rxq->sw_rx_ring[idx].page_offset = 0;
43 mapping = rte_mbuf_data_dma_addr_default(new_mb);
44 /* Advance PROD and get BD pointer */
45 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
46 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
47 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
52 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
56 if (rxq->sw_rx_ring != NULL) {
57 for (i = 0; i < rxq->nb_rx_desc; i++) {
58 if (rxq->sw_rx_ring[i].mbuf != NULL) {
59 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
60 rxq->sw_rx_ring[i].mbuf = NULL;
66 void qede_rx_queue_release(void *rx_queue)
68 struct qede_rx_queue *rxq = rx_queue;
71 qede_rx_queue_release_mbufs(rxq);
72 rte_free(rxq->sw_rx_ring);
73 rxq->sw_rx_ring = NULL;
80 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
81 uint16_t nb_desc, unsigned int socket_id,
82 const struct rte_eth_rxconf *rx_conf,
83 struct rte_mempool *mp)
85 struct qede_dev *qdev = dev->data->dev_private;
86 struct ecore_dev *edev = &qdev->edev;
87 struct rte_eth_dev_data *eth_data = dev->data;
88 struct qede_rx_queue *rxq;
89 uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
95 PMD_INIT_FUNC_TRACE(edev);
97 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
98 if (!rte_is_power_of_2(nb_desc)) {
99 DP_ERR(edev, "Ring size %u is not power of 2\n",
104 /* Free memory prior to re-allocation if needed... */
105 if (dev->data->rx_queues[queue_idx] != NULL) {
106 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
107 dev->data->rx_queues[queue_idx] = NULL;
110 /* First allocate the rx queue data structure */
111 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
112 RTE_CACHE_LINE_SIZE, socket_id);
115 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
122 rxq->nb_rx_desc = nb_desc;
123 rxq->queue_id = queue_idx;
124 rxq->port_id = dev->data->port_id;
127 data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
128 RTE_PKTMBUF_HEADROOM;
130 if (pkt_len > data_size) {
131 DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
138 rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
140 DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
141 qdev->mtu, rxq->rx_buf_size);
143 if (pkt_len > ETHER_MAX_LEN) {
144 dev->data->dev_conf.rxmode.jumbo_frame = 1;
145 DP_NOTICE(edev, false, "jumbo frame enabled\n");
147 dev->data->dev_conf.rxmode.jumbo_frame = 0;
150 /* Allocate the parallel driver ring for Rx buffers */
151 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
152 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
153 RTE_CACHE_LINE_SIZE, socket_id);
154 if (!rxq->sw_rx_ring) {
155 DP_NOTICE(edev, false,
156 "Unable to alloc memory for sw_rx_ring on socket %u\n",
163 /* Allocate FW Rx ring */
164 rc = qdev->ops->common->chain_alloc(edev,
165 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
166 ECORE_CHAIN_MODE_NEXT_PTR,
167 ECORE_CHAIN_CNT_TYPE_U16,
169 sizeof(struct eth_rx_bd),
172 if (rc != ECORE_SUCCESS) {
173 DP_NOTICE(edev, false,
174 "Unable to alloc memory for rxbd ring on socket %u\n",
176 rte_free(rxq->sw_rx_ring);
177 rxq->sw_rx_ring = NULL;
182 /* Allocate FW completion ring */
183 rc = qdev->ops->common->chain_alloc(edev,
184 ECORE_CHAIN_USE_TO_CONSUME,
185 ECORE_CHAIN_MODE_PBL,
186 ECORE_CHAIN_CNT_TYPE_U16,
188 sizeof(union eth_rx_cqe),
191 if (rc != ECORE_SUCCESS) {
192 DP_NOTICE(edev, false,
193 "Unable to alloc memory for cqe ring on socket %u\n",
195 /* TBD: Freeing RX BD ring */
196 rte_free(rxq->sw_rx_ring);
197 rxq->sw_rx_ring = NULL;
201 /* Allocate buffers for the Rx ring */
202 for (i = 0; i < rxq->nb_rx_desc; i++) {
203 rc = qede_alloc_rx_buffer(rxq);
205 DP_NOTICE(edev, false,
206 "RX buffer allocation failed at idx=%d\n", i);
211 dev->data->rx_queues[queue_idx] = rxq;
212 if (!qdev->rx_queues)
213 qdev->rx_queues = (struct qede_rx_queue **)dev->data->rx_queues;
215 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
216 queue_idx, nb_desc, qdev->mtu, socket_id);
220 qede_rx_queue_release(rxq);
224 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
228 PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
230 if (txq->sw_tx_ring != NULL) {
231 for (i = 0; i < txq->nb_tx_desc; i++) {
232 if (txq->sw_tx_ring[i].mbuf != NULL) {
233 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
234 txq->sw_tx_ring[i].mbuf = NULL;
240 void qede_tx_queue_release(void *tx_queue)
242 struct qede_tx_queue *txq = tx_queue;
245 qede_tx_queue_release_mbufs(txq);
246 if (txq->sw_tx_ring) {
247 rte_free(txq->sw_tx_ring);
248 txq->sw_tx_ring = NULL;
256 qede_tx_queue_setup(struct rte_eth_dev *dev,
259 unsigned int socket_id,
260 const struct rte_eth_txconf *tx_conf)
262 struct qede_dev *qdev = dev->data->dev_private;
263 struct ecore_dev *edev = &qdev->edev;
264 struct qede_tx_queue *txq;
267 PMD_INIT_FUNC_TRACE(edev);
269 if (!rte_is_power_of_2(nb_desc)) {
270 DP_ERR(edev, "Ring size %u is not power of 2\n",
275 /* Free memory prior to re-allocation if needed... */
276 if (dev->data->tx_queues[queue_idx] != NULL) {
277 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
278 dev->data->tx_queues[queue_idx] = NULL;
281 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
282 RTE_CACHE_LINE_SIZE, socket_id);
286 "Unable to allocate memory for txq on socket %u",
291 txq->nb_tx_desc = nb_desc;
293 txq->port_id = dev->data->port_id;
295 rc = qdev->ops->common->chain_alloc(edev,
296 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
297 ECORE_CHAIN_MODE_PBL,
298 ECORE_CHAIN_CNT_TYPE_U16,
300 sizeof(union eth_tx_bd_types),
302 if (rc != ECORE_SUCCESS) {
304 "Unable to allocate memory for txbd ring on socket %u",
306 qede_tx_queue_release(txq);
310 /* Allocate software ring */
311 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
312 (sizeof(struct qede_tx_entry) *
314 RTE_CACHE_LINE_SIZE, socket_id);
316 if (!txq->sw_tx_ring) {
318 "Unable to allocate memory for txbd ring on socket %u",
320 qede_tx_queue_release(txq);
324 txq->queue_id = queue_idx;
326 txq->nb_tx_avail = txq->nb_tx_desc;
328 txq->tx_free_thresh =
329 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
330 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
332 dev->data->tx_queues[queue_idx] = txq;
333 if (!qdev->tx_queues)
334 qdev->tx_queues = (struct qede_tx_queue **)dev->data->tx_queues;
336 txq->txq_counter = 0;
339 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
340 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
345 /* This function inits fp content and resets the SB, RXQ and TXQ arrays */
346 static void qede_init_fp(struct qede_dev *qdev)
348 struct qede_fastpath *fp;
349 int rss_id, txq_index, tc;
351 memset((void *)qdev->fp_array, 0, (QEDE_RSS_CNT(qdev) *
352 sizeof(*qdev->fp_array)));
353 memset((void *)qdev->sb_array, 0, (QEDE_RSS_CNT(qdev) *
354 sizeof(*qdev->sb_array)));
355 for_each_rss(rss_id) {
356 fp = &qdev->fp_array[rss_id];
361 /* Point rxq to generic rte queues that was created
362 * as part of queue creation.
364 fp->rxq = qdev->rx_queues[rss_id];
365 fp->sb_info = &qdev->sb_array[rss_id];
367 for (tc = 0; tc < qdev->num_tc; tc++) {
368 txq_index = tc * QEDE_RSS_CNT(qdev) + rss_id;
369 fp->txqs[tc] = qdev->tx_queues[txq_index];
370 fp->txqs[tc]->queue_id = txq_index;
371 /* Updating it to main structure */
372 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
377 qdev->gro_disable = gro_disable;
380 void qede_free_fp_arrays(struct qede_dev *qdev)
382 /* It asseumes qede_free_mem_load() is called before */
383 if (qdev->fp_array != NULL) {
384 rte_free(qdev->fp_array);
385 qdev->fp_array = NULL;
388 if (qdev->sb_array != NULL) {
389 rte_free(qdev->sb_array);
390 qdev->sb_array = NULL;
394 int qede_alloc_fp_array(struct qede_dev *qdev)
396 struct qede_fastpath *fp;
397 struct ecore_dev *edev = &qdev->edev;
400 qdev->fp_array = rte_calloc("fp", QEDE_RSS_CNT(qdev),
401 sizeof(*qdev->fp_array),
402 RTE_CACHE_LINE_SIZE);
404 if (!qdev->fp_array) {
405 DP_ERR(edev, "fp array allocation failed\n");
409 qdev->sb_array = rte_calloc("sb", QEDE_RSS_CNT(qdev),
410 sizeof(*qdev->sb_array),
411 RTE_CACHE_LINE_SIZE);
413 if (!qdev->sb_array) {
414 DP_ERR(edev, "sb array allocation failed\n");
415 rte_free(qdev->fp_array);
422 /* This function allocates fast-path status block memory */
424 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
427 struct ecore_dev *edev = &qdev->edev;
428 struct status_block *sb_virt;
432 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
435 DP_ERR(edev, "Status block allocation failed\n");
439 rc = qdev->ops->common->sb_init(edev, sb_info,
440 sb_virt, sb_phys, sb_id,
441 QED_SB_TYPE_L2_QUEUE);
443 DP_ERR(edev, "Status block initialization failed\n");
444 /* TBD: No dma_free_coherent possible */
451 static int qede_alloc_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
453 return qede_alloc_mem_sb(qdev, fp->sb_info, fp->rss_id);
456 static void qede_shrink_txq(struct qede_dev *qdev, uint16_t num_rss)
458 /* @@@TBD - this should also re-set the qed interrupts */
461 /* This function allocates all qede memory at NIC load. */
462 static int qede_alloc_mem_load(struct qede_dev *qdev)
465 struct ecore_dev *edev = &qdev->edev;
467 for (rss_id = 0; rss_id < QEDE_RSS_CNT(qdev); rss_id++) {
468 struct qede_fastpath *fp = &qdev->fp_array[rss_id];
470 rc = qede_alloc_mem_fp(qdev, fp);
475 if (rss_id != QEDE_RSS_CNT(qdev)) {
476 /* Failed allocating memory for all the queues */
479 "Failed to alloc memory for leading queue\n");
482 DP_NOTICE(edev, false,
483 "Failed to allocate memory for all of "
485 "Desired: %d queues, allocated: %d queues\n",
486 QEDE_RSS_CNT(qdev), rss_id);
487 qede_shrink_txq(qdev, rss_id);
489 qdev->num_rss = rss_id;
496 qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
498 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
499 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
500 struct eth_rx_prod_data rx_prods = { 0 };
502 /* Update producers */
503 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
504 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
506 /* Make sure that the BD and SGE data is updated before updating the
507 * producers since FW might read the BD/SGE right after the producer
512 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
513 (uint32_t *)&rx_prods);
515 /* mmiowb is needed to synchronize doorbell writes from more than one
516 * processor. It guarantees that the write arrives to the device before
517 * the napi lock is released and another qede_poll is called (possibly
518 * on another CPU). Without this barrier, the next doorbell can bypass
519 * this doorbell. This is applicable to IA64/Altix systems.
523 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u\n", bd_prod, cqe_prod);
526 static inline uint32_t
527 qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
529 return index % n_rx_rings;
532 static void qede_prandom_bytes(uint32_t *buff, size_t bytes)
536 srand((unsigned int)time(NULL));
538 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
543 qede_config_rss(struct rte_eth_dev *eth_dev,
544 struct qed_update_vport_rss_params *rss_params)
546 enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode;
547 struct rte_eth_rss_conf rss_conf =
548 eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
549 struct qede_dev *qdev = eth_dev->data->dev_private;
550 struct ecore_dev *edev = &qdev->edev;
553 PMD_INIT_FUNC_TRACE(edev);
555 /* Check if RSS conditions are met.
556 * Note: Even though its meaningless to enable RSS with one queue, it
557 * could be used to produce RSS Hash, so skipping that check.
560 if (!(mode & ETH_MQ_RX_RSS)) {
561 DP_INFO(edev, "RSS flag is not set\n");
565 DP_INFO(edev, "RSS flag is set\n");
567 if (rss_conf.rss_hf == 0) {
568 DP_NOTICE(edev, false, "No RSS hash function to apply\n");
572 if (rss_conf.rss_key != NULL) {
573 DP_NOTICE(edev, false,
574 "User provided RSS key is not supported\n");
578 memset(rss_params, 0, sizeof(*rss_params));
580 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
581 rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i,
584 qede_prandom_bytes(rss_params->rss_key,
585 sizeof(rss_params->rss_key));
587 DP_INFO(edev, "RSS check passes\n");
592 static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
594 struct qede_dev *qdev = eth_dev->data->dev_private;
595 struct ecore_dev *edev = &qdev->edev;
596 struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
597 struct qed_dev_info *qed_info = &qdev->dev_info.common;
598 struct qed_update_vport_params vport_update_params;
599 struct qed_start_vport_params start = { 0 };
600 int vlan_removal_en = 1;
603 if (!qdev->num_rss) {
605 "Cannot update V-VPORT as active as "
606 "there are no Rx queues\n");
610 start.remove_inner_vlan = vlan_removal_en;
611 start.gro_enable = !qdev->gro_disable;
612 start.mtu = qdev->mtu;
614 start.drop_ttl0 = true;
615 start.clear_stats = clear_stats;
617 rc = qdev->ops->vport_start(edev, &start);
619 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
624 "Start vport ramrod passed, vport_id = %d,"
625 " MTU = %d, vlan_removal_en = %d\n",
626 start.vport_id, qdev->mtu, vlan_removal_en);
629 struct qede_fastpath *fp = &qdev->fp_array[i];
630 dma_addr_t p_phys_table;
633 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
634 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
636 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); /* @DPDK */
638 rc = qdev->ops->q_rx_start(edev, i, i, 0,
639 fp->sb_info->igu_sb_id,
641 fp->rxq->rx_buf_size,
642 fp->rxq->rx_bd_ring.p_phys_addr,
645 &fp->rxq->hw_rxq_prod_addr);
647 DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
651 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
653 qede_update_rx_prod(qdev, fp->rxq);
655 for (tc = 0; tc < qdev->num_tc; tc++) {
656 struct qede_tx_queue *txq = fp->txqs[tc];
657 int txq_index = tc * QEDE_RSS_CNT(qdev) + i;
659 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
660 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
661 rc = qdev->ops->q_tx_start(edev, i, txq_index,
663 fp->sb_info->igu_sb_id,
665 p_phys_table, page_cnt,
666 &txq->doorbell_addr);
668 DP_ERR(edev, "Start txq %u failed %d\n",
674 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
675 SET_FIELD(txq->tx_db.data.params,
676 ETH_DB_DATA_DEST, DB_DEST_XCM);
677 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
679 SET_FIELD(txq->tx_db.data.params,
680 ETH_DB_DATA_AGG_VAL_SEL,
681 DQ_XCM_ETH_TX_BD_PROD_CMD);
683 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
687 /* Prepare and send the vport enable */
688 memset(&vport_update_params, 0, sizeof(vport_update_params));
689 vport_update_params.vport_id = start.vport_id;
690 vport_update_params.update_vport_active_flg = 1;
691 vport_update_params.vport_active_flg = 1;
694 if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
695 /* TBD: Check SRIOV enabled for VF */
696 vport_update_params.update_tx_switching_flg = 1;
697 vport_update_params.tx_switching_flg = 1;
700 if (!qede_config_rss(eth_dev, rss_params)) {
701 vport_update_params.update_rss_flg = 1;
703 qdev->rss_enabled = 1;
704 DP_INFO(edev, "Updating RSS flag\n");
706 qdev->rss_enabled = 0;
707 DP_INFO(edev, "Not Updating RSS flag\n");
710 rte_memcpy(&vport_update_params.rss_params, rss_params,
711 sizeof(*rss_params));
713 rc = qdev->ops->vport_update(edev, &vport_update_params);
715 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
723 static bool qede_tunn_exist(uint16_t flag)
725 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
726 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
729 static inline uint8_t qede_check_tunn_csum(uint16_t flag)
732 uint16_t csum_flag = 0;
734 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
735 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
736 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
737 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
739 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
740 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
741 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
742 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
743 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
746 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
747 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
748 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
749 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
751 if (csum_flag & flag)
752 return QEDE_CSUM_ERROR;
754 return QEDE_CSUM_UNNECESSARY | tcsum;
757 static inline uint8_t qede_tunn_exist(uint16_t flag)
762 static inline uint8_t qede_check_tunn_csum(uint16_t flag)
768 static inline uint8_t qede_check_notunn_csum(uint16_t flag)
771 uint16_t csum_flag = 0;
773 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
774 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
775 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
776 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
777 csum = QEDE_CSUM_UNNECESSARY;
780 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
781 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
783 if (csum_flag & flag)
784 return QEDE_CSUM_ERROR;
789 static inline uint8_t qede_check_csum(uint16_t flag)
791 if (likely(!qede_tunn_exist(flag)))
792 return qede_check_notunn_csum(flag);
794 return qede_check_tunn_csum(flag);
797 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
799 ecore_chain_consume(&rxq->rx_bd_ring);
804 qede_reuse_page(struct qede_dev *qdev,
805 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
807 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
808 uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
809 struct qede_rx_entry *curr_prod;
810 dma_addr_t new_mapping;
812 curr_prod = &rxq->sw_rx_ring[idx];
813 *curr_prod = *curr_cons;
815 new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
816 curr_prod->page_offset;
818 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
819 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
825 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
826 struct qede_dev *qdev, uint8_t count)
828 struct qede_rx_entry *curr_cons;
830 for (; count > 0; count--) {
831 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
832 qede_reuse_page(qdev, rxq, curr_cons);
833 qede_rx_bd_ring_consume(rxq);
837 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
840 /* TBD - L4 indications needed ? */
841 uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
842 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);
844 /* protocol = 3 means LLC/SNAP over Ethernet */
845 if (unlikely(protocol == 0 || protocol == 3))
846 p_type = RTE_PTYPE_UNKNOWN;
847 else if (protocol == 1)
848 p_type = RTE_PTYPE_L3_IPV4;
849 else if (protocol == 2)
850 p_type = RTE_PTYPE_L3_IPV6;
852 return RTE_PTYPE_L2_ETHER | p_type;
856 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
858 struct qede_rx_queue *rxq = p_rxq;
859 struct qede_dev *qdev = rxq->qdev;
860 struct ecore_dev *edev = &qdev->edev;
861 struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
862 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
864 union eth_rx_cqe *cqe;
865 struct eth_fast_path_rx_reg_cqe *fp_cqe;
866 register struct rte_mbuf *rx_mb = NULL;
867 enum eth_rx_cqe_type cqe_type;
869 uint16_t preload_idx;
872 enum rss_hash_type htype;
874 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
875 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
879 if (hw_comp_cons == sw_comp_cons)
882 while (sw_comp_cons != hw_comp_cons) {
883 /* Get the CQE from the completion ring */
885 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
886 cqe_type = cqe->fast_path_regular.type;
888 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
889 PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
891 qdev->ops->eth_cqe_completion(edev, fp->rss_id,
892 (struct eth_slow_path_rx_cqe *)cqe);
896 /* Get the data from the SW ring */
897 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
898 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
899 assert(rx_mb != NULL);
902 fp_cqe = &cqe->fast_path_regular;
904 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
905 pad = fp_cqe->placement_offset;
906 assert((len + pad) <= rx_mb->buf_len);
908 PMD_RX_LOG(DEBUG, rxq,
909 "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
910 " len = %u, parsing_flags = %d\n",
911 cqe_type, fp_cqe->bitfields,
912 rte_le_to_cpu_16(fp_cqe->vlan_tag),
913 len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
915 /* If this is an error packet then drop it */
917 rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
918 csum_flag = qede_check_csum(parse_flag);
919 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
921 "CQE in CONS = %u has error, flags = 0x%x "
922 "dropping incoming packet\n",
923 sw_comp_cons, parse_flag);
925 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
929 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
931 "New buffer allocation failed,"
932 "dropping incoming packet\n");
933 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
934 rte_eth_devices[rxq->port_id].
935 data->rx_mbuf_alloc_failed++;
936 rxq->rx_alloc_errors++;
940 qede_rx_bd_ring_consume(rxq);
942 /* Prefetch next mbuf while processing current one. */
943 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
944 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
946 if (fp_cqe->bd_num != 1)
947 PMD_RX_LOG(DEBUG, rxq,
948 "Jumbo-over-BD packet not supported\n");
950 /* Update MBUF fields */
952 rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
954 rx_mb->data_len = len;
955 rx_mb->pkt_len = len;
956 rx_mb->port = rxq->port_id;
957 rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
959 htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
960 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
961 if (qdev->rss_enabled && htype) {
962 rx_mb->ol_flags |= PKT_RX_RSS_HASH;
963 rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
964 PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
968 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
970 if (CQE_HAS_VLAN(parse_flag)) {
971 rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
972 rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
975 if (CQE_HAS_OUTER_VLAN(parse_flag)) {
976 /* FW does not provide indication of Outer VLAN tag,
977 * which is always stripped, so vlan_tci_outer is set
978 * to 0. Here vlan_tag represents inner VLAN tag.
980 rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
981 rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
984 rx_pkts[rx_pkt] = rx_mb;
987 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
988 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
989 if (rx_pkt == nb_pkts) {
990 PMD_RX_LOG(DEBUG, rxq,
991 "Budget reached nb_pkts=%u received=%u\n",
997 qede_update_rx_prod(qdev, rxq);
999 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
1005 qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
1007 uint16_t idx = TX_CONS(txq);
1008 struct eth_tx_bd *tx_data_bd;
1009 struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
1011 if (unlikely(!mbuf)) {
1012 PMD_TX_LOG(ERR, txq,
1013 "null mbuf nb_tx_desc %u nb_tx_avail %u "
1014 "sw_tx_cons %u sw_tx_prod %u\n",
1015 txq->nb_tx_desc, txq->nb_tx_avail, idx,
1021 rte_pktmbuf_free_seg(mbuf);
1022 txq->sw_tx_ring[idx].mbuf = NULL;
1023 ecore_chain_consume(&txq->tx_pbl);
1029 static inline uint16_t
1030 qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
1032 uint16_t tx_compl = 0;
1033 uint16_t hw_bd_cons;
1036 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
1037 rte_compiler_barrier();
1039 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
1040 rc = qede_free_tx_pkt(edev, txq);
1042 DP_NOTICE(edev, false,
1043 "hw_bd_cons = %d, chain_cons=%d\n",
1045 ecore_chain_get_cons_idx(&txq->tx_pbl));
1048 txq->sw_tx_cons++; /* Making TXD available */
1052 PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
1053 tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
1058 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1060 struct qede_tx_queue *txq = p_txq;
1061 struct qede_dev *qdev = txq->qdev;
1062 struct ecore_dev *edev = &qdev->edev;
1063 struct qede_fastpath *fp = &qdev->fp_array[txq->queue_id];
1064 struct eth_tx_1st_bd *first_bd;
1065 uint16_t nb_tx_pkts;
1066 uint16_t nb_pkt_sent = 0;
1071 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1072 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
1073 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1074 (void)qede_process_tx_compl(edev, txq);
1077 nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / MAX_NUM_TX_BDS));
1078 if (unlikely(nb_tx_pkts == 0)) {
1079 PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
1080 nb_pkts, txq->nb_tx_avail);
1084 tx_count = nb_tx_pkts;
1085 while (nb_tx_pkts--) {
1086 /* Fill the entry in the SW ring and the BDs in the FW ring */
1088 struct rte_mbuf *mbuf = *tx_pkts++;
1089 txq->sw_tx_ring[idx].mbuf = mbuf;
1090 first_bd = (struct eth_tx_1st_bd *)
1091 ecore_chain_produce(&txq->tx_pbl);
1092 first_bd->data.bd_flags.bitfields =
1093 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1094 /* Map MBUF linear data for DMA and set in the first BD */
1095 QEDE_BD_SET_ADDR_LEN(first_bd, rte_mbuf_data_dma_addr(mbuf),
1098 /* Descriptor based VLAN insertion */
1099 if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1100 first_bd->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
1101 first_bd->data.bd_flags.bitfields |=
1102 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1105 /* Offload the IP checksum in the hardware */
1106 if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
1107 first_bd->data.bd_flags.bitfields |=
1108 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1111 /* L4 checksum offload (tcp or udp) */
1112 if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
1113 first_bd->data.bd_flags.bitfields |=
1114 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1115 /* IPv6 + extn. -> later */
1117 first_bd->data.nbds = MAX_NUM_TX_BDS;
1119 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
1122 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1126 /* Write value of prod idx into bd_prod */
1127 txq->tx_db.data.bd_prod = bd_prod;
1129 rte_compiler_barrier();
1130 DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
1133 /* Check again for Tx completions */
1134 (void)qede_process_tx_compl(edev, txq);
1136 PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
1137 nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
1142 int qede_dev_start(struct rte_eth_dev *eth_dev)
1144 struct qede_dev *qdev = eth_dev->data->dev_private;
1145 struct ecore_dev *edev = &qdev->edev;
1146 struct qed_link_output link_output;
1149 DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
1151 if (qdev->state == QEDE_START) {
1152 DP_INFO(edev, "device already started\n");
1156 if (qdev->state == QEDE_CLOSE) {
1157 rc = qede_alloc_fp_array(qdev);
1159 rc = qede_alloc_mem_load(qdev);
1160 DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
1161 QEDE_RSS_CNT(qdev), qdev->num_tc);
1162 } else if (qdev->state == QEDE_STOP) {
1163 DP_INFO(edev, "restarting port %u\n", eth_dev->data->port_id);
1165 DP_INFO(edev, "unknown state port %u\n",
1166 eth_dev->data->port_id);
1170 rc = qede_start_queues(eth_dev, true);
1173 DP_ERR(edev, "Failed to start queues\n");
1178 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
1180 qede_dev_set_link_state(eth_dev, true);
1182 /* Query whether link is already-up */
1183 memset(&link_output, 0, sizeof(link_output));
1184 qdev->ops->common->get_link(edev, &link_output);
1185 DP_NOTICE(edev, false, "link status: %s\n",
1186 link_output.link_up ? "up" : "down");
1188 qdev->state = QEDE_START;
1190 qede_config_rx_mode(eth_dev);
1192 DP_INFO(edev, "dev_state is QEDE_START\n");
1197 static int qede_drain_txq(struct qede_dev *qdev,
1198 struct qede_tx_queue *txq, bool allow_drain)
1200 struct ecore_dev *edev = &qdev->edev;
1203 while (txq->sw_tx_cons != txq->sw_tx_prod) {
1204 qede_process_tx_compl(edev, txq);
1207 DP_NOTICE(edev, false,
1208 "Tx queue[%u] is stuck,"
1209 "requesting MCP to drain\n",
1211 rc = qdev->ops->common->drain(edev);
1214 return qede_drain_txq(qdev, txq, false);
1217 DP_NOTICE(edev, false,
1218 "Timeout waiting for tx queue[%d]:"
1219 "PROD=%d, CONS=%d\n",
1220 txq->queue_id, txq->sw_tx_prod,
1226 rte_compiler_barrier();
1229 /* FW finished processing, wait for HW to transmit all tx packets */
1235 static int qede_stop_queues(struct qede_dev *qdev)
1237 struct qed_update_vport_params vport_update_params;
1238 struct ecore_dev *edev = &qdev->edev;
1241 /* Disable the vport */
1242 memset(&vport_update_params, 0, sizeof(vport_update_params));
1243 vport_update_params.vport_id = 0;
1244 vport_update_params.update_vport_active_flg = 1;
1245 vport_update_params.vport_active_flg = 0;
1246 vport_update_params.update_rss_flg = 0;
1248 DP_INFO(edev, "vport_update\n");
1250 rc = qdev->ops->vport_update(edev, &vport_update_params);
1252 DP_ERR(edev, "Failed to update vport\n");
1256 DP_INFO(edev, "Flushing tx queues\n");
1258 /* Flush Tx queues. If needed, request drain from MCP */
1260 struct qede_fastpath *fp = &qdev->fp_array[i];
1261 for (tc = 0; tc < qdev->num_tc; tc++) {
1262 struct qede_tx_queue *txq = fp->txqs[tc];
1263 rc = qede_drain_txq(qdev, txq, true);
1269 /* Stop all Queues in reverse order */
1270 for (i = QEDE_RSS_CNT(qdev) - 1; i >= 0; i--) {
1271 struct qed_stop_rxq_params rx_params;
1273 /* Stop the Tx Queue(s) */
1274 for (tc = 0; tc < qdev->num_tc; tc++) {
1275 struct qed_stop_txq_params tx_params;
1277 tx_params.rss_id = i;
1278 tx_params.tx_queue_id = tc * QEDE_RSS_CNT(qdev) + i;
1280 DP_INFO(edev, "Stopping tx queues\n");
1281 rc = qdev->ops->q_tx_stop(edev, &tx_params);
1283 DP_ERR(edev, "Failed to stop TXQ #%d\n",
1284 tx_params.tx_queue_id);
1289 /* Stop the Rx Queue */
1290 memset(&rx_params, 0, sizeof(rx_params));
1291 rx_params.rss_id = i;
1292 rx_params.rx_queue_id = i;
1293 rx_params.eq_completion_only = 1;
1295 DP_INFO(edev, "Stopping rx queues\n");
1297 rc = qdev->ops->q_rx_stop(edev, &rx_params);
1299 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1304 DP_INFO(edev, "Stopping vports\n");
1306 /* Stop the vport */
1307 rc = qdev->ops->vport_stop(edev, 0);
1309 DP_ERR(edev, "Failed to stop VPORT\n");
1314 void qede_reset_fp_rings(struct qede_dev *qdev)
1319 for_each_rss(rss_id) {
1320 DP_INFO(&qdev->edev, "reset fp chain for rss %u\n", rss_id);
1321 struct qede_fastpath *fp = &qdev->fp_array[rss_id];
1322 ecore_chain_reset(&fp->rxq->rx_bd_ring);
1323 ecore_chain_reset(&fp->rxq->rx_comp_ring);
1324 for (tc = 0; tc < qdev->num_tc; tc++) {
1325 struct qede_tx_queue *txq = fp->txqs[tc];
1326 ecore_chain_reset(&txq->tx_pbl);
1331 /* This function frees all memory of a single fp */
1332 static void qede_free_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
1336 qede_rx_queue_release(fp->rxq);
1337 for (tc = 0; tc < qdev->num_tc; tc++)
1338 qede_tx_queue_release(fp->txqs[tc]);
1341 void qede_free_mem_load(struct qede_dev *qdev)
1345 for_each_rss(rss_id) {
1346 struct qede_fastpath *fp = &qdev->fp_array[rss_id];
1347 qede_free_mem_fp(qdev, fp);
1349 /* qdev->num_rss = 0; */
1353 * Stop an Ethernet device. The device can be restarted with a call to
1354 * rte_eth_dev_start().
1355 * Do not change link state and do not release sw structures.
1357 void qede_dev_stop(struct rte_eth_dev *eth_dev)
1359 struct qede_dev *qdev = eth_dev->data->dev_private;
1360 struct ecore_dev *edev = &qdev->edev;
1363 DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
1365 if (qdev->state != QEDE_START) {
1366 DP_INFO(edev, "device not yet started\n");
1370 rc = qede_stop_queues(qdev);
1373 DP_ERR(edev, "Didn't succeed to close queues\n");
1375 DP_INFO(edev, "Stopped queues\n");
1377 qdev->ops->fastpath_stop(edev);
1379 qede_reset_fp_rings(qdev);
1381 qdev->state = QEDE_STOP;
1383 DP_INFO(edev, "dev_state is QEDE_STOP\n");