X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fqede%2Fqede_rxtx.c;h=2e181c8ca7e49de95a7f0727f99b2f564d9a735b;hb=7634c5f9156922f2929a0c310f0ae3b4f74bf177;hp=9df0d1335d37c96f37e0013af983e32016f3946e;hpb=d87246a43759526a4e01d291debda2540f91c2de;p=dpdk.git diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c index 9df0d1335d..2e181c8ca7 100644 --- a/drivers/net/qede/qede_rxtx.c +++ b/drivers/net/qede/qede_rxtx.c @@ -10,9 +10,6 @@ static bool gro_disable = 1; /* mod_param */ -#define QEDE_FASTPATH_TX (1 << 0) -#define QEDE_FASTPATH_RX (1 << 1) - static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) { struct rte_mbuf *new_mb = NULL; @@ -64,7 +61,7 @@ void qede_rx_queue_release(void *rx_queue) rte_free(rxq->sw_rx_ring); rxq->sw_rx_ring = NULL; rte_free(rxq); - rx_queue = NULL; + rxq = NULL; } } @@ -135,8 +132,19 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; + if (pkt_len > data_size && !dev->data->scattered_rx) { + DP_ERR(edev, "MTU %u should not exceed dataroom %u\n", + pkt_len, data_size); + rte_free(rxq); + return -EINVAL; + } + + if (dev->data->scattered_rx) + rxq->rx_buf_size = data_size; + else + rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD; + qdev->mtu = pkt_len; - rxq->rx_buf_size = data_size; DP_INFO(edev, "MTU = %u ; RX buffer = %u\n", qdev->mtu, rxq->rx_buf_size); @@ -234,7 +242,7 @@ void qede_tx_queue_release(void *tx_queue) } rte_free(txq); } - tx_queue = NULL; + txq = NULL; } int @@ -424,9 +432,22 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info, int qede_alloc_fp_resc(struct qede_dev *qdev) { + struct ecore_dev *edev = &qdev->edev; struct qede_fastpath *fp; + uint32_t num_sbs; int rc, i; + if (IS_VF(edev)) + ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs); + else + num_sbs = (ecore_cxt_get_proto_cid_count + (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL)) / 2; + + if (num_sbs == 0) { + DP_ERR(edev, "No status blocks available\n"); + return -EINVAL; + } + if (qdev->fp_array) qede_free_fp_arrays(qdev); @@ -438,7 +459,7 @@ int qede_alloc_fp_resc(struct qede_dev *qdev) for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) { fp = &qdev->fp_array[i]; - if (qede_alloc_mem_sb(qdev, fp->sb_info, i)) { + if (qede_alloc_mem_sb(qdev, fp->sb_info, i % num_sbs)) { qede_free_fp_arrays(qdev); return -ENOMEM; } @@ -502,9 +523,9 @@ static void qede_prandom_bytes(uint32_t *buff, size_t bytes) buff[i] = rand(); } -static int -qede_config_rss(struct rte_eth_dev *eth_dev, - struct qed_update_vport_rss_params *rss_params) +static bool +qede_check_vport_rss_enable(struct rte_eth_dev *eth_dev, + struct qed_update_vport_rss_params *rss_params) { struct rte_eth_rss_conf rss_conf; enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode; @@ -515,63 +536,53 @@ qede_config_rss(struct rte_eth_dev *eth_dev, uint64_t hf; uint32_t *key; + PMD_INIT_FUNC_TRACE(edev); + rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; key = (uint32_t *)rss_conf.rss_key; hf = rss_conf.rss_hf; - PMD_INIT_FUNC_TRACE(edev); /* Check if RSS conditions are met. * Note: Even though its meaningless to enable RSS with one queue, it * could be used to produce RSS Hash, so skipping that check. */ - if (!(mode & ETH_MQ_RX_RSS)) { DP_INFO(edev, "RSS flag is not set\n"); - return -EINVAL; + return false; } - DP_INFO(edev, "RSS flag is set\n"); - - if (rss_conf.rss_hf == 0) - DP_NOTICE(edev, false, "RSS hash function = 0, disables RSS\n"); - - if (rss_conf.rss_key != NULL) - memcpy(qdev->rss_params.rss_key, rss_conf.rss_key, - rss_conf.rss_key_len); + if (hf == 0) { + DP_INFO(edev, "Request to disable RSS\n"); + return false; + } memset(rss_params, 0, sizeof(*rss_params)); for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i, - QEDE_RSS_CNT(qdev)); + QEDE_RSS_COUNT(qdev)); - /* key and protocols */ - if (rss_conf.rss_key == NULL) + if (!key) qede_prandom_bytes(rss_params->rss_key, sizeof(rss_params->rss_key)); else memcpy(rss_params->rss_key, rss_conf.rss_key, rss_conf.rss_key_len); - rss_caps = 0; - rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; - rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; - rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; - rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; - rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; - rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + qede_init_rss_caps(&rss_caps, hf); rss_params->rss_caps = rss_caps; - DP_INFO(edev, "RSS check passes\n"); + DP_INFO(edev, "RSS conditions are met\n"); - return 0; + return true; } static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; + struct ecore_queue_start_common_params q_params; struct qed_update_vport_rss_params *rss_params = &qdev->rss_params; struct qed_dev_info *qed_info = &qdev->dev_info.common; struct qed_update_vport_params vport_update_params; @@ -591,12 +602,15 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats) page_cnt = ecore_chain_get_page_cnt(&fp->rxq-> rx_comp_ring); + memset(&q_params, 0, sizeof(q_params)); + q_params.queue_id = i; + q_params.vport_id = 0; + q_params.sb = fp->sb_info->igu_sb_id; + q_params.sb_idx = RX_PI; + ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); - rc = qdev->ops->q_rx_start(edev, i, fp->rxq->queue_id, - 0, - fp->sb_info->igu_sb_id, - RX_PI, + rc = qdev->ops->q_rx_start(edev, i, &q_params, fp->rxq->rx_buf_size, fp->rxq->rx_bd_ring.p_phys_addr, p_phys_table, @@ -618,15 +632,20 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats) continue; for (tc = 0; tc < qdev->num_tc; tc++) { txq = fp->txqs[tc]; - txq_index = tc * QEDE_RSS_CNT(qdev) + i; + txq_index = tc * QEDE_RSS_COUNT(qdev) + i; p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl); page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl); - rc = qdev->ops->q_tx_start(edev, i, txq->queue_id, - 0, - fp->sb_info->igu_sb_id, - TX_PI(tc), - p_phys_table, page_cnt, + + memset(&q_params, 0, sizeof(q_params)); + q_params.queue_id = txq->queue_id; + q_params.vport_id = 0; + q_params.sb = fp->sb_info->igu_sb_id; + q_params.sb_idx = TX_PI(tc); + + rc = qdev->ops->q_tx_start(edev, i, &q_params, + p_phys_table, + page_cnt, /* **pp_doorbell */ &txq->doorbell_addr); if (rc) { DP_ERR(edev, "Start txq %u failed %d\n", @@ -663,14 +682,11 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats) vport_update_params.tx_switching_flg = 1; } - if (!qede_config_rss(eth_dev, rss_params)) { + if (qede_check_vport_rss_enable(eth_dev, rss_params)) { vport_update_params.update_rss_flg = 1; - qdev->rss_enabled = 1; - DP_INFO(edev, "Updating RSS flag\n"); } else { qdev->rss_enabled = 0; - DP_INFO(edev, "Not Updating RSS flag\n"); } rte_memcpy(&vport_update_params.rss_params, rss_params, @@ -818,6 +834,59 @@ static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags) return RTE_PTYPE_L2_ETHER | p_type; } +int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb, + int num_segs, uint16_t pkt_len) +{ + struct qede_rx_queue *rxq = p_rxq; + struct qede_dev *qdev = rxq->qdev; + struct ecore_dev *edev = &qdev->edev; + uint16_t sw_rx_index, cur_size; + + register struct rte_mbuf *seg1 = NULL; + register struct rte_mbuf *seg2 = NULL; + + seg1 = rx_mb; + while (num_segs) { + cur_size = pkt_len > rxq->rx_buf_size ? + rxq->rx_buf_size : pkt_len; + if (!cur_size) { + PMD_RX_LOG(DEBUG, rxq, + "SG packet, len and num BD mismatch\n"); + qede_recycle_rx_bd_ring(rxq, qdev, num_segs); + return -EINVAL; + } + + if (qede_alloc_rx_buffer(rxq)) { + uint8_t index; + + PMD_RX_LOG(DEBUG, rxq, "Buffer allocation failed\n"); + index = rxq->port_id; + rte_eth_devices[index].data->rx_mbuf_alloc_failed++; + rxq->rx_alloc_errors++; + return -ENOMEM; + } + + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); + seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf; + qede_rx_bd_ring_consume(rxq); + pkt_len -= cur_size; + seg2->data_len = cur_size; + seg1->next = seg2; + seg1 = seg1->next; + num_segs--; + rxq->rx_segs++; + continue; + } + seg1 = NULL; + + if (pkt_len) + PMD_RX_LOG(DEBUG, rxq, + "Mapped all BDs of jumbo, but still have %d bytes\n", + pkt_len); + + return ECORE_SUCCESS; +} + uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -830,12 +899,12 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) union eth_rx_cqe *cqe; struct eth_fast_path_rx_reg_cqe *fp_cqe; register struct rte_mbuf *rx_mb = NULL; + register struct rte_mbuf *seg1 = NULL; enum eth_rx_cqe_type cqe_type; - uint16_t len, pad; - uint16_t preload_idx; - uint8_t csum_flag; - uint16_t parse_flag; + uint16_t len, pad, preload_idx, pkt_len, parse_flag; + uint8_t csum_flag, num_segs; enum rss_hash_type htype; + int ret; hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr); sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); @@ -905,20 +974,33 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) qede_rx_bd_ring_consume(rxq); + if (fp_cqe->bd_num > 1) { + pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len); + num_segs = fp_cqe->bd_num - 1; + + rxq->rx_segs++; + + pkt_len -= len; + seg1 = rx_mb; + ret = qede_process_sg_pkts(p_rxq, seg1, num_segs, + pkt_len); + if (ret != ECORE_SUCCESS) { + qede_recycle_rx_bd_ring(rxq, qdev, + fp_cqe->bd_num); + goto next_cqe; + } + } + /* Prefetch next mbuf while processing current one. */ preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf); - if (fp_cqe->bd_num != 1) - PMD_RX_LOG(DEBUG, rxq, - "Jumbo-over-BD packet not supported\n"); - /* Update MBUF fields */ rx_mb->ol_flags = 0; rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; - rx_mb->nb_segs = 1; + rx_mb->nb_segs = fp_cqe->bd_num; rx_mb->data_len = len; - rx_mb->pkt_len = len; + rx_mb->pkt_len = fp_cqe->pkt_len; rx_mb->port = rxq->port_id; rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag); @@ -963,6 +1045,8 @@ next_cqe: qede_update_rx_prod(qdev, rxq); + rxq->rcv_pkts += rx_pkt; + PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id()); return rx_pkt; @@ -971,24 +1055,28 @@ next_cqe: static inline int qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq) { - uint16_t idx = TX_CONS(txq); + uint16_t nb_segs, idx = TX_CONS(txq); struct eth_tx_bd *tx_data_bd; struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf; if (unlikely(!mbuf)) { + PMD_TX_LOG(ERR, txq, "null mbuf\n"); PMD_TX_LOG(ERR, txq, - "null mbuf nb_tx_desc %u nb_tx_avail %u " - "sw_tx_cons %u sw_tx_prod %u\n", + "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n", txq->nb_tx_desc, txq->nb_tx_avail, idx, TX_PROD(txq)); return -1; } - /* Free now */ - rte_pktmbuf_free_seg(mbuf); + nb_segs = mbuf->nb_segs; + while (nb_segs) { + /* It's like consuming rxbuf in recv() */ + ecore_chain_consume(&txq->tx_pbl); + txq->nb_tx_avail++; + nb_segs--; + } + rte_pktmbuf_free(mbuf); txq->sw_tx_ring[idx].mbuf = NULL; - ecore_chain_consume(&txq->tx_pbl); - txq->nb_tx_avail++; return 0; } @@ -998,18 +1086,16 @@ qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq) { uint16_t tx_compl = 0; uint16_t hw_bd_cons; - int rc; hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr); rte_compiler_barrier(); while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) { - rc = qede_free_tx_pkt(edev, txq); - if (rc) { - DP_NOTICE(edev, false, - "hw_bd_cons = %d, chain_cons=%d\n", - hw_bd_cons, - ecore_chain_get_cons_idx(&txq->tx_pbl)); + if (qede_free_tx_pkt(edev, txq)) { + PMD_TX_LOG(ERR, txq, + "hw_bd_cons = %u, chain_cons = %u\n", + hw_bd_cons, + ecore_chain_get_cons_idx(&txq->tx_pbl)); break; } txq->sw_tx_cons++; /* Making TXD available */ @@ -1021,6 +1107,55 @@ qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq) return tx_compl; } +/* Populate scatter gather buffer descriptor fields */ +static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq, + struct rte_mbuf *m_seg, + uint16_t count, + struct eth_tx_1st_bd *bd1) +{ + struct qede_tx_queue *txq = p_txq; + struct eth_tx_2nd_bd *bd2 = NULL; + struct eth_tx_3rd_bd *bd3 = NULL; + struct eth_tx_bd *tx_bd = NULL; + uint16_t nb_segs = count; + dma_addr_t mapping; + + /* Check for scattered buffers */ + while (m_seg) { + if (nb_segs == 1) { + bd2 = (struct eth_tx_2nd_bd *) + ecore_chain_produce(&txq->tx_pbl); + memset(bd2, 0, sizeof(*bd2)); + mapping = rte_mbuf_data_dma_addr(m_seg); + bd2->addr.hi = rte_cpu_to_le_32(U64_HI(mapping)); + bd2->addr.lo = rte_cpu_to_le_32(U64_LO(mapping)); + bd2->nbytes = rte_cpu_to_le_16(m_seg->data_len); + } else if (nb_segs == 2) { + bd3 = (struct eth_tx_3rd_bd *) + ecore_chain_produce(&txq->tx_pbl); + memset(bd3, 0, sizeof(*bd3)); + mapping = rte_mbuf_data_dma_addr(m_seg); + bd3->addr.hi = rte_cpu_to_le_32(U64_HI(mapping)); + bd3->addr.lo = rte_cpu_to_le_32(U64_LO(mapping)); + bd3->nbytes = rte_cpu_to_le_16(m_seg->data_len); + } else { + tx_bd = (struct eth_tx_bd *) + ecore_chain_produce(&txq->tx_pbl); + memset(tx_bd, 0, sizeof(*tx_bd)); + mapping = rte_mbuf_data_dma_addr(m_seg); + tx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping)); + tx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping)); + tx_bd->nbytes = rte_cpu_to_le_16(m_seg->data_len); + } + nb_segs++; + bd1->data.nbds = nb_segs; + m_seg = m_seg->next; + } + + /* Return total scattered buffers */ + return nb_segs; +} + uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1028,12 +1163,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct qede_dev *qdev = txq->qdev; struct ecore_dev *edev = &qdev->edev; struct qede_fastpath *fp; - struct eth_tx_1st_bd *first_bd; + struct eth_tx_1st_bd *bd1; + struct rte_mbuf *m_seg = NULL; uint16_t nb_tx_pkts; uint16_t nb_pkt_sent = 0; uint16_t bd_prod; uint16_t idx; uint16_t tx_count; + uint16_t nb_segs = 0; fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id]; @@ -1043,7 +1180,8 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) (void)qede_process_tx_compl(edev, txq); } - nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / MAX_NUM_TX_BDS)); + nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / + ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)); if (unlikely(nb_tx_pkts == 0)) { PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n", nb_pkts, txq->nb_tx_avail); @@ -1055,41 +1193,53 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = TX_PROD(txq); struct rte_mbuf *mbuf = *tx_pkts++; + txq->sw_tx_ring[idx].mbuf = mbuf; - first_bd = (struct eth_tx_1st_bd *) - ecore_chain_produce(&txq->tx_pbl); - first_bd->data.bd_flags.bitfields = - 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); + /* Zero init struct fields */ + bd1->data.bd_flags.bitfields = 0; + bd1->data.bitfields = 0; + + bd1->data.bd_flags.bitfields = + 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; /* Map MBUF linear data for DMA and set in the first BD */ - QEDE_BD_SET_ADDR_LEN(first_bd, rte_mbuf_data_dma_addr(mbuf), - mbuf->data_len); + QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf), + mbuf->pkt_len); /* Descriptor based VLAN insertion */ if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { - first_bd->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci); - first_bd->data.bd_flags.bitfields |= + bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci); + bd1->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; } /* Offload the IP checksum in the hardware */ if (mbuf->ol_flags & PKT_TX_IP_CKSUM) { - first_bd->data.bd_flags.bitfields |= + bd1->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; } /* L4 checksum offload (tcp or udp) */ if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - first_bd->data.bd_flags.bitfields |= + bd1->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; /* IPv6 + extn. -> later */ } - first_bd->data.nbds = MAX_NUM_TX_BDS; + + /* Handle fragmented MBUF */ + m_seg = mbuf->next; + nb_segs++; + bd1->data.nbds = nb_segs; + /* Encode scatter gather buffer descriptors if required */ + nb_segs = qede_encode_sg_bd(txq, m_seg, nb_segs, bd1); + txq->nb_tx_avail = txq->nb_tx_avail - nb_segs; + nb_segs = 0; txq->sw_tx_prod++; rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf); - txq->nb_tx_avail--; bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); nb_pkt_sent++; + txq->xmit_pkts++; } /* Write value of prod idx into bd_prod */ @@ -1124,7 +1274,7 @@ static void qede_init_fp_queue(struct rte_eth_dev *eth_dev) if (fp->type & QEDE_FASTPATH_TX) { for (tc = 0; tc < qdev->num_tc; tc++) { - txq_index = tc * QEDE_TSS_CNT(qdev) + txq; + txq_index = tc * QEDE_TSS_COUNT(qdev) + txq; fp->txqs[tc] = eth_dev->data->tx_queues[txq_index]; fp->txqs[tc]->queue_id = txq_index; @@ -1326,6 +1476,7 @@ int qede_reset_fp_rings(struct qede_dev *qdev) if (fp->type & QEDE_FASTPATH_TX) { for (tc = 0; tc < qdev->num_tc; tc++) { txq = fp->txqs[tc]; + qede_tx_queue_release_mbufs(txq); ecore_chain_reset(&txq->tx_pbl); txq->sw_tx_cons = 0; txq->sw_tx_prod = 0; @@ -1338,29 +1489,26 @@ int qede_reset_fp_rings(struct qede_dev *qdev) } /* This function frees all memory of a single fp */ -static void qede_free_mem_fp(struct rte_eth_dev *eth_dev, - struct qede_fastpath *fp) -{ - struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); - uint8_t tc; - - qede_rx_queue_release(fp->rxq); - for (tc = 0; tc < qdev->num_tc; tc++) { - qede_tx_queue_release(fp->txqs[tc]); - eth_dev->data->tx_queues[tc] = NULL; - } -} - void qede_free_mem_load(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct qede_fastpath *fp; - uint8_t rss_id; + uint16_t txq_idx; + uint8_t id; + uint8_t tc; - for_each_queue(rss_id) { - fp = &qdev->fp_array[rss_id]; - qede_free_mem_fp(eth_dev, fp); - eth_dev->data->rx_queues[rss_id] = NULL; + for_each_queue(id) { + fp = &qdev->fp_array[id]; + if (fp->type & QEDE_FASTPATH_RX) { + qede_rx_queue_release(fp->rxq); + eth_dev->data->rx_queues[id] = NULL; + } else { + for (tc = 0; tc < qdev->num_tc; tc++) { + txq_idx = fp->txqs[tc]->queue_id; + qede_tx_queue_release(fp->txqs[tc]); + eth_dev->data->tx_queues[txq_idx] = NULL; + } + } } }