2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 static bool gro_disable = 1; /* mod_param */
13 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
15 struct rte_mbuf *new_mb = NULL;
16 struct eth_rx_bd *rx_bd;
18 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
20 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
21 if (unlikely(!new_mb)) {
23 "Failed to allocate rx buffer "
24 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
25 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
26 rte_mempool_avail_count(rxq->mb_pool),
27 rte_mempool_in_use_count(rxq->mb_pool));
30 rxq->sw_rx_ring[idx].mbuf = new_mb;
31 rxq->sw_rx_ring[idx].page_offset = 0;
32 mapping = rte_mbuf_data_dma_addr_default(new_mb);
33 /* Advance PROD and get BD pointer */
34 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
35 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
36 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
41 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
45 if (rxq->sw_rx_ring != NULL) {
46 for (i = 0; i < rxq->nb_rx_desc; i++) {
47 if (rxq->sw_rx_ring[i].mbuf != NULL) {
48 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
49 rxq->sw_rx_ring[i].mbuf = NULL;
55 void qede_rx_queue_release(void *rx_queue)
57 struct qede_rx_queue *rxq = rx_queue;
60 qede_rx_queue_release_mbufs(rxq);
61 rte_free(rxq->sw_rx_ring);
62 rxq->sw_rx_ring = NULL;
68 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
72 PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
74 if (txq->sw_tx_ring) {
75 for (i = 0; i < txq->nb_tx_desc; i++) {
76 if (txq->sw_tx_ring[i].mbuf) {
77 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
78 txq->sw_tx_ring[i].mbuf = NULL;
85 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
86 uint16_t nb_desc, unsigned int socket_id,
87 const struct rte_eth_rxconf *rx_conf,
88 struct rte_mempool *mp)
90 struct qede_dev *qdev = dev->data->dev_private;
91 struct ecore_dev *edev = &qdev->edev;
92 struct rte_eth_dev_data *eth_data = dev->data;
93 struct qede_rx_queue *rxq;
94 uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
100 PMD_INIT_FUNC_TRACE(edev);
102 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
103 if (!rte_is_power_of_2(nb_desc)) {
104 DP_ERR(edev, "Ring size %u is not power of 2\n",
109 /* Free memory prior to re-allocation if needed... */
110 if (dev->data->rx_queues[queue_idx] != NULL) {
111 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
112 dev->data->rx_queues[queue_idx] = NULL;
115 /* First allocate the rx queue data structure */
116 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
117 RTE_CACHE_LINE_SIZE, socket_id);
120 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
127 rxq->nb_rx_desc = nb_desc;
128 rxq->queue_id = queue_idx;
129 rxq->port_id = dev->data->port_id;
132 data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
133 RTE_PKTMBUF_HEADROOM;
135 if (pkt_len > data_size && !dev->data->scattered_rx) {
136 DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
142 if (dev->data->scattered_rx)
143 rxq->rx_buf_size = data_size;
145 rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
149 DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
150 qdev->mtu, rxq->rx_buf_size);
152 if (pkt_len > ETHER_MAX_LEN) {
153 dev->data->dev_conf.rxmode.jumbo_frame = 1;
154 DP_NOTICE(edev, false, "jumbo frame enabled\n");
156 dev->data->dev_conf.rxmode.jumbo_frame = 0;
159 /* Allocate the parallel driver ring for Rx buffers */
160 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
161 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
162 RTE_CACHE_LINE_SIZE, socket_id);
163 if (!rxq->sw_rx_ring) {
164 DP_NOTICE(edev, false,
165 "Unable to alloc memory for sw_rx_ring on socket %u\n",
172 /* Allocate FW Rx ring */
173 rc = qdev->ops->common->chain_alloc(edev,
174 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
175 ECORE_CHAIN_MODE_NEXT_PTR,
176 ECORE_CHAIN_CNT_TYPE_U16,
178 sizeof(struct eth_rx_bd),
182 if (rc != ECORE_SUCCESS) {
183 DP_NOTICE(edev, false,
184 "Unable to alloc memory for rxbd ring on socket %u\n",
186 rte_free(rxq->sw_rx_ring);
187 rxq->sw_rx_ring = NULL;
193 /* Allocate FW completion ring */
194 rc = qdev->ops->common->chain_alloc(edev,
195 ECORE_CHAIN_USE_TO_CONSUME,
196 ECORE_CHAIN_MODE_PBL,
197 ECORE_CHAIN_CNT_TYPE_U16,
199 sizeof(union eth_rx_cqe),
203 if (rc != ECORE_SUCCESS) {
204 DP_NOTICE(edev, false,
205 "Unable to alloc memory for cqe ring on socket %u\n",
207 /* TBD: Freeing RX BD ring */
208 rte_free(rxq->sw_rx_ring);
209 rxq->sw_rx_ring = NULL;
214 /* Allocate buffers for the Rx ring */
215 for (i = 0; i < rxq->nb_rx_desc; i++) {
216 rc = qede_alloc_rx_buffer(rxq);
218 DP_NOTICE(edev, false,
219 "RX buffer allocation failed at idx=%d\n", i);
224 dev->data->rx_queues[queue_idx] = rxq;
226 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
227 queue_idx, nb_desc, qdev->mtu, socket_id);
231 qede_rx_queue_release(rxq);
235 void qede_tx_queue_release(void *tx_queue)
237 struct qede_tx_queue *txq = tx_queue;
240 qede_tx_queue_release_mbufs(txq);
241 if (txq->sw_tx_ring) {
242 rte_free(txq->sw_tx_ring);
243 txq->sw_tx_ring = NULL;
251 qede_tx_queue_setup(struct rte_eth_dev *dev,
254 unsigned int socket_id,
255 const struct rte_eth_txconf *tx_conf)
257 struct qede_dev *qdev = dev->data->dev_private;
258 struct ecore_dev *edev = &qdev->edev;
259 struct qede_tx_queue *txq;
262 PMD_INIT_FUNC_TRACE(edev);
264 if (!rte_is_power_of_2(nb_desc)) {
265 DP_ERR(edev, "Ring size %u is not power of 2\n",
270 /* Free memory prior to re-allocation if needed... */
271 if (dev->data->tx_queues[queue_idx] != NULL) {
272 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
273 dev->data->tx_queues[queue_idx] = NULL;
276 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
277 RTE_CACHE_LINE_SIZE, socket_id);
281 "Unable to allocate memory for txq on socket %u",
286 txq->nb_tx_desc = nb_desc;
288 txq->port_id = dev->data->port_id;
290 rc = qdev->ops->common->chain_alloc(edev,
291 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
292 ECORE_CHAIN_MODE_PBL,
293 ECORE_CHAIN_CNT_TYPE_U16,
295 sizeof(union eth_tx_bd_types),
298 if (rc != ECORE_SUCCESS) {
300 "Unable to allocate memory for txbd ring on socket %u",
302 qede_tx_queue_release(txq);
306 /* Allocate software ring */
307 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
308 (sizeof(struct qede_tx_entry) *
310 RTE_CACHE_LINE_SIZE, socket_id);
312 if (!txq->sw_tx_ring) {
314 "Unable to allocate memory for txbd ring on socket %u",
316 qede_tx_queue_release(txq);
320 txq->queue_id = queue_idx;
322 txq->nb_tx_avail = txq->nb_tx_desc;
324 txq->tx_free_thresh =
325 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
326 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
328 dev->data->tx_queues[queue_idx] = txq;
331 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
332 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
337 /* This function inits fp content and resets the SB, RXQ and TXQ arrays */
338 static void qede_init_fp(struct qede_dev *qdev)
340 struct qede_fastpath *fp;
341 uint8_t i, rss_id, tc;
342 int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
344 memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
345 sizeof(*qdev->fp_array)));
346 memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
347 sizeof(*qdev->sb_array)));
349 fp = &qdev->fp_array[i];
351 fp->type = QEDE_FASTPATH_RX;
354 fp->type = QEDE_FASTPATH_TX;
358 fp->sb_info = &qdev->sb_array[i];
359 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
362 qdev->gro_disable = gro_disable;
365 void qede_free_fp_arrays(struct qede_dev *qdev)
367 /* It asseumes qede_free_mem_load() is called before */
368 if (qdev->fp_array != NULL) {
369 rte_free(qdev->fp_array);
370 qdev->fp_array = NULL;
373 if (qdev->sb_array != NULL) {
374 rte_free(qdev->sb_array);
375 qdev->sb_array = NULL;
379 int qede_alloc_fp_array(struct qede_dev *qdev)
381 struct qede_fastpath *fp;
382 struct ecore_dev *edev = &qdev->edev;
385 qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
386 sizeof(*qdev->fp_array),
387 RTE_CACHE_LINE_SIZE);
389 if (!qdev->fp_array) {
390 DP_ERR(edev, "fp array allocation failed\n");
394 qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
395 sizeof(*qdev->sb_array),
396 RTE_CACHE_LINE_SIZE);
398 if (!qdev->sb_array) {
399 DP_ERR(edev, "sb array allocation failed\n");
400 rte_free(qdev->fp_array);
407 /* This function allocates fast-path status block memory */
409 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
412 struct ecore_dev *edev = &qdev->edev;
413 struct status_block *sb_virt;
417 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
420 DP_ERR(edev, "Status block allocation failed\n");
424 rc = qdev->ops->common->sb_init(edev, sb_info,
425 sb_virt, sb_phys, sb_id,
426 QED_SB_TYPE_L2_QUEUE);
428 DP_ERR(edev, "Status block initialization failed\n");
429 /* TBD: No dma_free_coherent possible */
436 int qede_alloc_fp_resc(struct qede_dev *qdev)
438 struct ecore_dev *edev = &qdev->edev;
439 struct qede_fastpath *fp;
444 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
446 num_sbs = (ecore_cxt_get_proto_cid_count
447 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL)) / 2;
450 DP_ERR(edev, "No status blocks available\n");
455 qede_free_fp_arrays(qdev);
457 rc = qede_alloc_fp_array(qdev);
463 for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
464 fp = &qdev->fp_array[i];
465 if (qede_alloc_mem_sb(qdev, fp->sb_info, i % num_sbs)) {
466 qede_free_fp_arrays(qdev);
474 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
476 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
478 qede_free_mem_load(eth_dev);
479 qede_free_fp_arrays(qdev);
483 qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
485 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
486 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
487 struct eth_rx_prod_data rx_prods = { 0 };
489 /* Update producers */
490 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
491 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
493 /* Make sure that the BD and SGE data is updated before updating the
494 * producers since FW might read the BD/SGE right after the producer
499 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
500 (uint32_t *)&rx_prods);
502 /* mmiowb is needed to synchronize doorbell writes from more than one
503 * processor. It guarantees that the write arrives to the device before
504 * the napi lock is released and another qede_poll is called (possibly
505 * on another CPU). Without this barrier, the next doorbell can bypass
506 * this doorbell. This is applicable to IA64/Altix systems.
510 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u\n", bd_prod, cqe_prod);
513 static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
515 struct qede_dev *qdev = eth_dev->data->dev_private;
516 struct ecore_dev *edev = &qdev->edev;
517 struct ecore_queue_start_common_params q_params;
518 struct qed_dev_info *qed_info = &qdev->dev_info.common;
519 struct qed_update_vport_params vport_update_params;
520 struct qede_tx_queue *txq;
521 struct qede_fastpath *fp;
522 dma_addr_t p_phys_table;
525 int vlan_removal_en = 1;
529 fp = &qdev->fp_array[i];
530 if (fp->type & QEDE_FASTPATH_RX) {
531 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
533 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
536 memset(&q_params, 0, sizeof(q_params));
537 q_params.queue_id = i;
538 q_params.vport_id = 0;
539 q_params.sb = fp->sb_info->igu_sb_id;
540 q_params.sb_idx = RX_PI;
542 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
544 rc = qdev->ops->q_rx_start(edev, i, &q_params,
545 fp->rxq->rx_buf_size,
546 fp->rxq->rx_bd_ring.p_phys_addr,
549 &fp->rxq->hw_rxq_prod_addr);
551 DP_ERR(edev, "Start rxq #%d failed %d\n",
552 fp->rxq->queue_id, rc);
556 fp->rxq->hw_cons_ptr =
557 &fp->sb_info->sb_virt->pi_array[RX_PI];
559 qede_update_rx_prod(qdev, fp->rxq);
562 if (!(fp->type & QEDE_FASTPATH_TX))
564 for (tc = 0; tc < qdev->num_tc; tc++) {
566 txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
568 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
569 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
571 memset(&q_params, 0, sizeof(q_params));
572 q_params.queue_id = txq->queue_id;
573 q_params.vport_id = 0;
574 q_params.sb = fp->sb_info->igu_sb_id;
575 q_params.sb_idx = TX_PI(tc);
577 rc = qdev->ops->q_tx_start(edev, i, &q_params,
579 page_cnt, /* **pp_doorbell */
580 &txq->doorbell_addr);
582 DP_ERR(edev, "Start txq %u failed %d\n",
588 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
589 SET_FIELD(txq->tx_db.data.params,
590 ETH_DB_DATA_DEST, DB_DEST_XCM);
591 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
593 SET_FIELD(txq->tx_db.data.params,
594 ETH_DB_DATA_AGG_VAL_SEL,
595 DQ_XCM_ETH_TX_BD_PROD_CMD);
597 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
601 /* Prepare and send the vport enable */
602 memset(&vport_update_params, 0, sizeof(vport_update_params));
603 /* Update MTU via vport update */
604 vport_update_params.mtu = qdev->mtu;
605 vport_update_params.vport_id = 0;
606 vport_update_params.update_vport_active_flg = 1;
607 vport_update_params.vport_active_flg = 1;
610 if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
611 /* TBD: Check SRIOV enabled for VF */
612 vport_update_params.update_tx_switching_flg = 1;
613 vport_update_params.tx_switching_flg = 1;
616 rc = qdev->ops->vport_update(edev, &vport_update_params);
618 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
625 static bool qede_tunn_exist(uint16_t flag)
627 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
628 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
632 * qede_check_tunn_csum_l4:
634 * 1 : If L4 csum is enabled AND if the validation has failed.
637 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
639 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
640 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
641 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
642 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
647 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
649 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
650 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
651 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
652 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
657 static inline uint8_t
658 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
665 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
666 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
669 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
670 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
671 ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
672 sizeof(struct ether_hdr));
673 pkt_csum = ip->hdr_checksum;
674 ip->hdr_checksum = 0;
675 calc_csum = rte_ipv4_cksum(ip);
676 ip->hdr_checksum = pkt_csum;
677 return (calc_csum != pkt_csum);
678 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
685 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
687 ecore_chain_consume(&rxq->rx_bd_ring);
692 qede_reuse_page(struct qede_dev *qdev,
693 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
695 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
696 uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
697 struct qede_rx_entry *curr_prod;
698 dma_addr_t new_mapping;
700 curr_prod = &rxq->sw_rx_ring[idx];
701 *curr_prod = *curr_cons;
703 new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
704 curr_prod->page_offset;
706 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
707 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
713 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
714 struct qede_dev *qdev, uint8_t count)
716 struct qede_rx_entry *curr_cons;
718 for (; count > 0; count--) {
719 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
720 qede_reuse_page(qdev, rxq, curr_cons);
721 qede_rx_bd_ring_consume(rxq);
725 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
730 static const uint32_t
731 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
732 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
733 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
734 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
735 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
736 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
737 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
740 /* Bits (0..3) provides L3/L4 protocol type */
741 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
742 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
743 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
744 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
746 if (val < QEDE_PKT_TYPE_MAX)
747 return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
749 return RTE_PTYPE_UNKNOWN;
752 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
757 static const uint32_t
758 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
759 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
760 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
761 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
762 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
763 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
764 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
765 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
766 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
767 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
768 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
769 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
770 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
771 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
772 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
773 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
774 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
775 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
776 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
777 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
778 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
779 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
780 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
781 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
782 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
783 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
784 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
785 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
786 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
787 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
788 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
789 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
790 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
791 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
792 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
793 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
794 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
795 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
796 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
797 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
798 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
801 /* Cover bits[4-0] to include tunn_type and next protocol */
802 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
803 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
804 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
805 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
807 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
808 return ptype_tunn_lkup_tbl[val];
810 return RTE_PTYPE_UNKNOWN;
814 qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
815 uint8_t num_segs, uint16_t pkt_len)
817 struct qede_rx_queue *rxq = p_rxq;
818 struct qede_dev *qdev = rxq->qdev;
819 struct ecore_dev *edev = &qdev->edev;
820 register struct rte_mbuf *seg1 = NULL;
821 register struct rte_mbuf *seg2 = NULL;
822 uint16_t sw_rx_index;
827 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
829 if (unlikely(!cur_size)) {
830 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
831 " left for mapping jumbo\n", num_segs);
832 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
835 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
836 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
837 qede_rx_bd_ring_consume(rxq);
839 seg2->data_len = cur_size;
850 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
852 struct qede_rx_queue *rxq = p_rxq;
853 struct qede_dev *qdev = rxq->qdev;
854 struct ecore_dev *edev = &qdev->edev;
855 struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
856 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
858 union eth_rx_cqe *cqe;
859 struct eth_fast_path_rx_reg_cqe *fp_cqe;
860 register struct rte_mbuf *rx_mb = NULL;
861 register struct rte_mbuf *seg1 = NULL;
862 enum eth_rx_cqe_type cqe_type;
863 uint16_t pkt_len; /* Sum of all BD segments */
864 uint16_t len; /* Length of first BD */
865 uint8_t num_segs = 1;
867 uint16_t preload_idx;
870 enum rss_hash_type htype;
871 uint8_t tunn_parse_flag;
874 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
875 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
879 if (hw_comp_cons == sw_comp_cons)
882 while (sw_comp_cons != hw_comp_cons) {
883 /* Get the CQE from the completion ring */
885 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
886 cqe_type = cqe->fast_path_regular.type;
888 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
889 PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
891 qdev->ops->eth_cqe_completion(edev, fp->id,
892 (struct eth_slow_path_rx_cqe *)cqe);
896 /* Get the data from the SW ring */
897 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
898 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
899 assert(rx_mb != NULL);
902 fp_cqe = &cqe->fast_path_regular;
904 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
905 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
906 pad = fp_cqe->placement_offset;
907 assert((len + pad) <= rx_mb->buf_len);
909 PMD_RX_LOG(DEBUG, rxq,
910 "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
911 " len = %u, parsing_flags = %d\n",
912 cqe_type, fp_cqe->bitfields,
913 rte_le_to_cpu_16(fp_cqe->vlan_tag),
914 len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
916 /* If this is an error packet then drop it */
918 rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
922 if (qede_tunn_exist(parse_flag)) {
923 PMD_RX_LOG(DEBUG, rxq, "Rx tunneled packet\n");
924 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
926 "L4 csum failed, flags = 0x%x\n",
929 rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
932 fp_cqe->tunnel_pars_flags.flags;
934 qede_rx_cqe_to_tunn_pkt_type(
938 PMD_RX_LOG(DEBUG, rxq, "Rx non-tunneled packet\n");
939 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
941 "L4 csum failed, flags = 0x%x\n",
944 rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
945 } else if (unlikely(qede_check_notunn_csum_l3(rx_mb,
948 "IP csum failed, flags = 0x%x\n",
951 rx_mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
954 qede_rx_cqe_to_pkt_type(parse_flag);
958 PMD_RX_LOG(INFO, rxq, "packet_type 0x%x\n", rx_mb->packet_type);
960 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
962 "New buffer allocation failed,"
963 "dropping incoming packet\n");
964 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
965 rte_eth_devices[rxq->port_id].
966 data->rx_mbuf_alloc_failed++;
967 rxq->rx_alloc_errors++;
970 qede_rx_bd_ring_consume(rxq);
971 if (fp_cqe->bd_num > 1) {
972 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
973 " len on first: %04x Total Len: %04x\n",
974 fp_cqe->bd_num, len, pkt_len);
975 num_segs = fp_cqe->bd_num - 1;
977 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
980 for (j = 0; j < num_segs; j++) {
981 if (qede_alloc_rx_buffer(rxq)) {
983 "Buffer allocation failed\n");
984 rte_eth_devices[rxq->port_id].
985 data->rx_mbuf_alloc_failed++;
986 rxq->rx_alloc_errors++;
992 rxq->rx_segs++; /* for the first segment */
994 /* Prefetch next mbuf while processing current one. */
995 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
996 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
998 /* Update rest of the MBUF fields */
999 rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
1000 rx_mb->nb_segs = fp_cqe->bd_num;
1001 rx_mb->data_len = len;
1002 rx_mb->pkt_len = pkt_len;
1003 rx_mb->port = rxq->port_id;
1005 htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
1006 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
1007 if (qdev->rss_enable && htype) {
1008 rx_mb->ol_flags |= PKT_RX_RSS_HASH;
1009 rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
1010 PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
1014 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1016 if (CQE_HAS_VLAN(parse_flag)) {
1017 rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1018 rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
1021 if (CQE_HAS_OUTER_VLAN(parse_flag)) {
1022 /* FW does not provide indication of Outer VLAN tag,
1023 * which is always stripped, so vlan_tci_outer is set
1024 * to 0. Here vlan_tag represents inner VLAN tag.
1026 rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1027 rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
1028 rx_mb->vlan_tci_outer = 0;
1031 rx_pkts[rx_pkt] = rx_mb;
1034 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1035 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1036 if (rx_pkt == nb_pkts) {
1037 PMD_RX_LOG(DEBUG, rxq,
1038 "Budget reached nb_pkts=%u received=%u\n",
1044 qede_update_rx_prod(qdev, rxq);
1046 rxq->rcv_pkts += rx_pkt;
1048 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
1054 qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
1056 uint16_t nb_segs, idx = TX_CONS(txq);
1057 struct eth_tx_bd *tx_data_bd;
1058 struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
1060 if (unlikely(!mbuf)) {
1061 PMD_TX_LOG(ERR, txq, "null mbuf\n");
1062 PMD_TX_LOG(ERR, txq,
1063 "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n",
1064 txq->nb_tx_desc, txq->nb_tx_avail, idx,
1069 nb_segs = mbuf->nb_segs;
1071 /* It's like consuming rxbuf in recv() */
1072 ecore_chain_consume(&txq->tx_pbl);
1076 rte_pktmbuf_free(mbuf);
1077 txq->sw_tx_ring[idx].mbuf = NULL;
1082 static inline uint16_t
1083 qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
1085 uint16_t tx_compl = 0;
1086 uint16_t hw_bd_cons;
1088 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
1089 rte_compiler_barrier();
1091 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
1092 if (qede_free_tx_pkt(edev, txq)) {
1093 PMD_TX_LOG(ERR, txq,
1094 "hw_bd_cons = %u, chain_cons = %u\n",
1096 ecore_chain_get_cons_idx(&txq->tx_pbl));
1099 txq->sw_tx_cons++; /* Making TXD available */
1103 PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
1104 tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
1108 /* Populate scatter gather buffer descriptor fields */
1109 static inline uint8_t
1110 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
1111 struct eth_tx_1st_bd *bd1)
1113 struct qede_tx_queue *txq = p_txq;
1114 struct eth_tx_2nd_bd *bd2 = NULL;
1115 struct eth_tx_3rd_bd *bd3 = NULL;
1116 struct eth_tx_bd *tx_bd = NULL;
1118 uint8_t nb_segs = 1; /* min one segment per packet */
1120 /* Check for scattered buffers */
1123 bd2 = (struct eth_tx_2nd_bd *)
1124 ecore_chain_produce(&txq->tx_pbl);
1125 memset(bd2, 0, sizeof(*bd2));
1126 mapping = rte_mbuf_data_dma_addr(m_seg);
1127 QEDE_BD_SET_ADDR_LEN(bd2, mapping, m_seg->data_len);
1128 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x\n",
1130 } else if (nb_segs == 2) {
1131 bd3 = (struct eth_tx_3rd_bd *)
1132 ecore_chain_produce(&txq->tx_pbl);
1133 memset(bd3, 0, sizeof(*bd3));
1134 mapping = rte_mbuf_data_dma_addr(m_seg);
1135 QEDE_BD_SET_ADDR_LEN(bd3, mapping, m_seg->data_len);
1136 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x\n",
1139 tx_bd = (struct eth_tx_bd *)
1140 ecore_chain_produce(&txq->tx_pbl);
1141 memset(tx_bd, 0, sizeof(*tx_bd));
1142 mapping = rte_mbuf_data_dma_addr(m_seg);
1143 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
1144 PMD_TX_LOG(DEBUG, txq, "BD len %04x\n",
1148 m_seg = m_seg->next;
1151 /* Return total scattered buffers */
1156 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1158 struct qede_tx_queue *txq = p_txq;
1159 struct qede_dev *qdev = txq->qdev;
1160 struct ecore_dev *edev = &qdev->edev;
1161 struct qede_fastpath *fp;
1162 struct eth_tx_1st_bd *bd1;
1163 struct rte_mbuf *mbuf;
1164 struct rte_mbuf *m_seg = NULL;
1165 uint16_t nb_tx_pkts;
1170 uint16_t nb_pkt_sent = 0;
1172 fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
1174 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1175 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
1176 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1177 (void)qede_process_tx_compl(edev, txq);
1180 nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail /
1181 ETH_TX_MAX_BDS_PER_NON_LSO_PACKET));
1182 if (unlikely(nb_tx_pkts == 0)) {
1183 PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
1184 nb_pkts, txq->nb_tx_avail);
1188 tx_count = nb_tx_pkts;
1189 while (nb_tx_pkts--) {
1190 /* Fill the entry in the SW ring and the BDs in the FW ring */
1193 txq->sw_tx_ring[idx].mbuf = mbuf;
1194 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
1195 bd1->data.bd_flags.bitfields =
1196 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1197 /* FW 8.10.x specific change */
1198 bd1->data.bitfields =
1199 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
1200 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
1201 /* Map MBUF linear data for DMA and set in the first BD */
1202 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
1204 PMD_TX_LOG(INFO, txq, "BD1 len %04x\n", mbuf->data_len);
1206 if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type)) {
1207 PMD_TX_LOG(INFO, txq, "Tx tunnel packet\n");
1208 /* First indicate its a tunnel pkt */
1209 bd1->data.bd_flags.bitfields |=
1210 ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
1211 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1213 /* Legacy FW had flipped behavior in regard to this bit
1214 * i.e. it needed to set to prevent FW from touching
1215 * encapsulated packets when it didn't need to.
1217 if (unlikely(txq->is_legacy))
1218 bd1->data.bitfields ^=
1219 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1221 /* Outer IP checksum offload */
1222 if (mbuf->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
1223 PMD_TX_LOG(INFO, txq, "OuterIP csum offload\n");
1224 bd1->data.bd_flags.bitfields |=
1225 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
1226 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1229 /* Outer UDP checksum offload */
1230 bd1->data.bd_flags.bitfields |=
1231 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
1232 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1235 /* Descriptor based VLAN insertion */
1236 if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1237 PMD_TX_LOG(INFO, txq, "Insert VLAN 0x%x\n",
1239 bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
1240 bd1->data.bd_flags.bitfields |=
1241 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1244 /* Offload the IP checksum in the hardware */
1245 if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
1246 PMD_TX_LOG(INFO, txq, "IP csum offload\n");
1247 bd1->data.bd_flags.bitfields |=
1248 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1251 /* L4 checksum offload (tcp or udp) */
1252 if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
1253 PMD_TX_LOG(INFO, txq, "L4 csum offload\n");
1254 bd1->data.bd_flags.bitfields |=
1255 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1256 /* IPv6 + extn. -> later */
1259 /* Handle fragmented MBUF */
1261 /* Encode scatter gather buffer descriptors if required */
1262 nb_frags = qede_encode_sg_bd(txq, m_seg, bd1);
1263 bd1->data.nbds = nb_frags;
1264 txq->nb_tx_avail -= nb_frags;
1266 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
1268 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1271 PMD_TX_LOG(INFO, txq, "nbds = %d pkt_len = %04x\n",
1272 bd1->data.nbds, mbuf->pkt_len);
1275 /* Write value of prod idx into bd_prod */
1276 txq->tx_db.data.bd_prod = bd_prod;
1278 rte_compiler_barrier();
1279 DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
1282 /* Check again for Tx completions */
1283 (void)qede_process_tx_compl(edev, txq);
1285 PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
1286 nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
1291 static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
1293 struct qede_dev *qdev = eth_dev->data->dev_private;
1294 struct qede_fastpath *fp;
1295 uint8_t i, rss_id, txq_index, tc;
1296 int rxq = 0, txq = 0;
1299 fp = &qdev->fp_array[i];
1300 if (fp->type & QEDE_FASTPATH_RX) {
1301 fp->rxq = eth_dev->data->rx_queues[i];
1302 fp->rxq->queue_id = rxq++;
1305 if (fp->type & QEDE_FASTPATH_TX) {
1306 for (tc = 0; tc < qdev->num_tc; tc++) {
1307 txq_index = tc * QEDE_TSS_COUNT(qdev) + txq;
1309 eth_dev->data->tx_queues[txq_index];
1310 fp->txqs[tc]->queue_id = txq_index;
1311 if (qdev->dev_info.is_legacy)
1312 fp->txqs[tc]->is_legacy = true;
1319 int qede_dev_start(struct rte_eth_dev *eth_dev)
1321 struct qede_dev *qdev = eth_dev->data->dev_private;
1322 struct ecore_dev *edev = &qdev->edev;
1323 struct qed_link_output link_output;
1324 struct qede_fastpath *fp;
1327 DP_INFO(edev, "Device state is %d\n", qdev->state);
1329 if (qdev->state == QEDE_DEV_START) {
1330 DP_INFO(edev, "Port is already started\n");
1334 if (qdev->state == QEDE_DEV_CONFIG)
1335 qede_init_fp_queue(eth_dev);
1337 rc = qede_start_queues(eth_dev, true);
1339 DP_ERR(edev, "Failed to start queues\n");
1344 /* Bring-up the link */
1345 qede_dev_set_link_state(eth_dev, true);
1348 if (qede_reset_fp_rings(qdev))
1351 /* Start/resume traffic */
1352 qdev->ops->fastpath_start(edev);
1354 qdev->state = QEDE_DEV_START;
1356 DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
1361 static int qede_drain_txq(struct qede_dev *qdev,
1362 struct qede_tx_queue *txq, bool allow_drain)
1364 struct ecore_dev *edev = &qdev->edev;
1367 while (txq->sw_tx_cons != txq->sw_tx_prod) {
1368 qede_process_tx_compl(edev, txq);
1371 DP_NOTICE(edev, false,
1372 "Tx queue[%u] is stuck,"
1373 "requesting MCP to drain\n",
1375 rc = qdev->ops->common->drain(edev);
1378 return qede_drain_txq(qdev, txq, false);
1381 DP_NOTICE(edev, false,
1382 "Timeout waiting for tx queue[%d]:"
1383 "PROD=%d, CONS=%d\n",
1384 txq->queue_id, txq->sw_tx_prod,
1390 rte_compiler_barrier();
1393 /* FW finished processing, wait for HW to transmit all tx packets */
1399 static int qede_stop_queues(struct qede_dev *qdev)
1401 struct qed_update_vport_params vport_update_params;
1402 struct ecore_dev *edev = &qdev->edev;
1405 /* Disable the vport */
1406 memset(&vport_update_params, 0, sizeof(vport_update_params));
1407 vport_update_params.vport_id = 0;
1408 vport_update_params.update_vport_active_flg = 1;
1409 vport_update_params.vport_active_flg = 0;
1410 vport_update_params.update_rss_flg = 0;
1412 DP_INFO(edev, "Deactivate vport\n");
1414 rc = qdev->ops->vport_update(edev, &vport_update_params);
1416 DP_ERR(edev, "Failed to update vport\n");
1420 DP_INFO(edev, "Flushing tx queues\n");
1422 /* Flush Tx queues. If needed, request drain from MCP */
1424 struct qede_fastpath *fp = &qdev->fp_array[i];
1426 if (fp->type & QEDE_FASTPATH_TX) {
1427 for (tc = 0; tc < qdev->num_tc; tc++) {
1428 struct qede_tx_queue *txq = fp->txqs[tc];
1430 rc = qede_drain_txq(qdev, txq, true);
1437 /* Stop all Queues in reverse order */
1438 for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
1439 struct qed_stop_rxq_params rx_params;
1441 /* Stop the Tx Queue(s) */
1442 if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
1443 for (tc = 0; tc < qdev->num_tc; tc++) {
1444 struct qed_stop_txq_params tx_params;
1447 tx_params.rss_id = i;
1448 val = qdev->fp_array[i].txqs[tc]->queue_id;
1449 tx_params.tx_queue_id = val;
1451 DP_INFO(edev, "Stopping tx queues\n");
1452 rc = qdev->ops->q_tx_stop(edev, &tx_params);
1454 DP_ERR(edev, "Failed to stop TXQ #%d\n",
1455 tx_params.tx_queue_id);
1461 /* Stop the Rx Queue */
1462 if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
1463 memset(&rx_params, 0, sizeof(rx_params));
1464 rx_params.rss_id = i;
1465 rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id;
1466 rx_params.eq_completion_only = 1;
1468 DP_INFO(edev, "Stopping rx queues\n");
1470 rc = qdev->ops->q_rx_stop(edev, &rx_params);
1472 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1481 int qede_reset_fp_rings(struct qede_dev *qdev)
1483 struct qede_fastpath *fp;
1484 struct qede_tx_queue *txq;
1488 for_each_queue(id) {
1489 fp = &qdev->fp_array[id];
1491 if (fp->type & QEDE_FASTPATH_RX) {
1492 DP_INFO(&qdev->edev,
1493 "Reset FP chain for RSS %u\n", id);
1494 qede_rx_queue_release_mbufs(fp->rxq);
1495 ecore_chain_reset(&fp->rxq->rx_bd_ring);
1496 ecore_chain_reset(&fp->rxq->rx_comp_ring);
1497 fp->rxq->sw_rx_prod = 0;
1498 fp->rxq->sw_rx_cons = 0;
1499 *fp->rxq->hw_cons_ptr = 0;
1500 for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
1501 if (qede_alloc_rx_buffer(fp->rxq)) {
1503 "RX buffer allocation failed\n");
1508 if (fp->type & QEDE_FASTPATH_TX) {
1509 for (tc = 0; tc < qdev->num_tc; tc++) {
1511 qede_tx_queue_release_mbufs(txq);
1512 ecore_chain_reset(&txq->tx_pbl);
1513 txq->sw_tx_cons = 0;
1514 txq->sw_tx_prod = 0;
1515 *txq->hw_cons_ptr = 0;
1523 /* This function frees all memory of a single fp */
1524 void qede_free_mem_load(struct rte_eth_dev *eth_dev)
1526 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1527 struct qede_fastpath *fp;
1532 for_each_queue(id) {
1533 fp = &qdev->fp_array[id];
1534 if (fp->type & QEDE_FASTPATH_RX) {
1537 qede_rx_queue_release(fp->rxq);
1538 eth_dev->data->rx_queues[id] = NULL;
1540 for (tc = 0; tc < qdev->num_tc; tc++) {
1543 txq_idx = fp->txqs[tc]->queue_id;
1544 qede_tx_queue_release(fp->txqs[tc]);
1545 eth_dev->data->tx_queues[txq_idx] = NULL;
1551 void qede_dev_stop(struct rte_eth_dev *eth_dev)
1553 struct qede_dev *qdev = eth_dev->data->dev_private;
1554 struct ecore_dev *edev = &qdev->edev;
1556 DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
1558 if (qdev->state != QEDE_DEV_START) {
1559 DP_INFO(edev, "Device not yet started\n");
1563 if (qede_stop_queues(qdev))
1564 DP_ERR(edev, "Didn't succeed to close queues\n");
1566 DP_INFO(edev, "Stopped queues\n");
1568 qdev->ops->fastpath_stop(edev);
1570 /* Bring the link down */
1571 qede_dev_set_link_state(eth_dev, false);
1573 qdev->state = QEDE_DEV_STOP;
1575 DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");