X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_rxtx.c;h=298f4e3e427378c582bc1585cda5e60c5b42ebfb;hb=d345d6c9575c337189a755c8692be4dd3e1ed708;hp=0002e2c1e79fdf72ed6d804983e8b355d0a99c2f;hpb=2c41740bf19ed0eb16d883557a24a654c804a0b4;p=dpdk.git diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c index 0002e2c1e7..298f4e3e42 100644 --- a/drivers/net/qede/qede_rxtx.c +++ b/drivers/net/qede/qede_rxtx.c @@ -24,8 +24,7 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) rte_mempool_in_use_count(rxq->mb_pool)); return -ENOMEM; } - rxq->sw_rx_ring[idx].mbuf = new_mb; - rxq->sw_rx_ring[idx].page_offset = 0; + rxq->sw_rx_ring[idx] = new_mb; mapping = rte_mbuf_data_iova_default(new_mb); /* Advance PROD and get BD pointer */ rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); @@ -39,17 +38,24 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count) { - void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned; struct rte_mbuf *mbuf = NULL; struct eth_rx_bd *rx_bd; dma_addr_t mapping; int i, ret = 0; uint16_t idx; + uint16_t mask = NUM_RX_BDS(rxq); if (count > QEDE_MAX_BULK_ALLOC_COUNT) count = QEDE_MAX_BULK_ALLOC_COUNT; - ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count); + idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); + + if (count > mask - idx + 1) + count = mask - idx + 1; + + ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)&rxq->sw_rx_ring[idx], + count); + if (unlikely(ret)) { PMD_RX_LOG(ERR, rxq, "Failed to allocate %d rx buffers " @@ -63,20 +69,17 @@ static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count) } for (i = 0; i < count; i++) { - mbuf = obj_p[i]; - if (likely(i < count - 1)) - rte_prefetch0(obj_p[i + 1]); + rte_prefetch0(rxq->sw_rx_ring[(idx + 1) & NUM_RX_BDS(rxq)]); + mbuf = rxq->sw_rx_ring[idx & NUM_RX_BDS(rxq)]; - idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); - rxq->sw_rx_ring[idx].mbuf = mbuf; - rxq->sw_rx_ring[idx].page_offset = 0; mapping = rte_mbuf_data_iova_default(mbuf); rx_bd = (struct eth_rx_bd *) ecore_chain_produce(&rxq->rx_bd_ring); rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping)); rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping)); - rxq->sw_rx_prod++; + idx++; } + rxq->sw_rx_prod = idx; return 0; } @@ -309,9 +312,9 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq) if (rxq->sw_rx_ring) { for (i = 0; i < rxq->nb_rx_desc; i++) { - if (rxq->sw_rx_ring[i].mbuf) { - rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf); - rxq->sw_rx_ring[i].mbuf = NULL; + if (rxq->sw_rx_ring[i]) { + rte_pktmbuf_free(rxq->sw_rx_ring[i]); + rxq->sw_rx_ring[i] = NULL; } } } @@ -882,38 +885,6 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) return rc; } -static inline uint16_t -qede_free_tx_pkt(struct qede_tx_queue *txq) -{ - struct rte_mbuf *mbuf; - uint16_t nb_segs; - uint16_t idx; - uint16_t ret; - - idx = TX_CONS(txq); - mbuf = txq->sw_tx_ring[idx]; - if (mbuf) { - nb_segs = mbuf->nb_segs; - PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs); - - ret = nb_segs; - while (nb_segs) { - /* It's like consuming rxbuf in recv() */ - ecore_chain_consume(&txq->tx_pbl); - nb_segs--; - } - - rte_pktmbuf_free(mbuf); - txq->sw_tx_ring[idx] = NULL; - txq->sw_tx_cons++; - PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n"); - } else { - ecore_chain_consume(&txq->tx_pbl); - ret = 1; - } - return ret; -} - static inline void qede_process_tx_compl(__rte_unused struct ecore_dev *edev, struct qede_tx_queue *txq) @@ -921,8 +892,14 @@ qede_process_tx_compl(__rte_unused struct ecore_dev *edev, uint16_t hw_bd_cons; uint16_t sw_tx_cons; uint16_t remaining; + uint16_t mask; + struct rte_mbuf *mbuf; + uint16_t nb_segs; + uint16_t idx; + uint16_t first_idx; rte_compiler_barrier(); + rte_prefetch0(txq->hw_cons_ptr); sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl); hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr); #ifdef RTE_LIBRTE_QEDE_DEBUG_TX @@ -930,11 +907,45 @@ qede_process_tx_compl(__rte_unused struct ecore_dev *edev, abs(hw_bd_cons - sw_tx_cons)); #endif + mask = NUM_TX_BDS(txq); + idx = txq->sw_tx_cons & mask; + remaining = hw_bd_cons - sw_tx_cons; txq->nb_tx_avail += remaining; + first_idx = idx; + + while (remaining) { + mbuf = txq->sw_tx_ring[idx]; + RTE_ASSERT(mbuf); + nb_segs = mbuf->nb_segs; + remaining -= nb_segs; + + /* Prefetch the next mbuf. Note that at least the last 4 mbufs + * that are prefetched will not be used in the current call. + */ + rte_mbuf_prefetch_part1(txq->sw_tx_ring[(idx + 4) & mask]); + rte_mbuf_prefetch_part2(txq->sw_tx_ring[(idx + 4) & mask]); + + PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs); - while (remaining) - remaining -= qede_free_tx_pkt(txq); + while (nb_segs) { + ecore_chain_consume(&txq->tx_pbl); + nb_segs--; + } + + idx = (idx + 1) & mask; + PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n"); + } + txq->sw_tx_cons = idx; + + if (first_idx > idx) { + rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx], + mask - first_idx + 1); + rte_pktmbuf_free_bulk(&txq->sw_tx_ring[0], idx); + } else { + rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx], + idx - first_idx); + } } static int qede_drain_txq(struct qede_dev *qdev, @@ -1310,18 +1321,15 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) static inline void qede_reuse_page(__rte_unused struct qede_dev *qdev, - struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons) + struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons) { struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring); uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); - struct qede_rx_entry *curr_prod; dma_addr_t new_mapping; - curr_prod = &rxq->sw_rx_ring[idx]; - *curr_prod = *curr_cons; + rxq->sw_rx_ring[idx] = curr_cons; - new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) + - curr_prod->page_offset; + new_mapping = rte_mbuf_data_iova_default(curr_cons); rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping)); rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping)); @@ -1333,10 +1341,10 @@ static inline void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *qdev, uint8_t count) { - struct qede_rx_entry *curr_cons; + struct rte_mbuf *curr_cons; for (; count > 0; count--) { - curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)]; + curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)]; qede_reuse_page(qdev, rxq, curr_cons); qede_rx_bd_ring_consume(rxq); } @@ -1358,7 +1366,7 @@ qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev, if (rte_le_to_cpu_16(len)) { tpa_info = &rxq->tpa_info[agg_index]; cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); - curr_frag = rxq->sw_rx_ring[cons_idx].mbuf; + curr_frag = rxq->sw_rx_ring[cons_idx]; assert(curr_frag); curr_frag->nb_segs = 1; curr_frag->pkt_len = rte_le_to_cpu_16(len); @@ -1490,7 +1498,7 @@ qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb, return -EINVAL; } sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); - seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf; + seg2 = rxq->sw_rx_ring[sw_rx_index]; qede_rx_bd_ring_consume(rxq); pkt_len -= cur_size; seg2->data_len = cur_size; @@ -1609,7 +1617,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* Get the data from the SW ring */ sw_rx_index = rxq->sw_rx_cons & num_rx_bds; - rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf; + rx_mb = rxq->sw_rx_ring[sw_rx_index]; assert(rx_mb != NULL); parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags); @@ -1708,7 +1716,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* Prefetch next mbuf while processing current one. */ preload_idx = rxq->sw_rx_cons & num_rx_bds; - rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf); + rte_prefetch0(rxq->sw_rx_ring[preload_idx]); /* Update rest of the MBUF fields */ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; @@ -1866,7 +1874,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* Get the data from the SW ring */ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); - rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf; + rx_mb = rxq->sw_rx_ring[sw_rx_index]; assert(rx_mb != NULL); /* Handle regular CQE or TPA start CQE */ @@ -1997,7 +2005,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* Prefetch next mbuf while processing current one. */ preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); - rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf); + rte_prefetch0(rxq->sw_rx_ring[preload_idx]); /* Update rest of the MBUF fields */ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;