net/qede: fix minsize build
[dpdk.git] / drivers / net / qede / qede_rxtx.c
index 9294f79..35cde56 100644 (file)
@@ -24,8 +24,7 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
                           rte_mempool_in_use_count(rxq->mb_pool));
                return -ENOMEM;
        }
-       rxq->sw_rx_ring[idx].mbuf = new_mb;
-       rxq->sw_rx_ring[idx].page_offset = 0;
+       rxq->sw_rx_ring[idx] = new_mb;
        mapping = rte_mbuf_data_iova_default(new_mb);
        /* Advance PROD and get BD pointer */
        rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
@@ -39,17 +38,24 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
 
 static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
 {
-       void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
        struct rte_mbuf *mbuf = NULL;
        struct eth_rx_bd *rx_bd;
        dma_addr_t mapping;
        int i, ret = 0;
        uint16_t idx;
+       uint16_t mask = NUM_RX_BDS(rxq);
 
        if (count > QEDE_MAX_BULK_ALLOC_COUNT)
                count = QEDE_MAX_BULK_ALLOC_COUNT;
 
-       ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
+       idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+
+       if (count > mask - idx + 1)
+               count = mask - idx + 1;
+
+       ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)&rxq->sw_rx_ring[idx],
+                                  count);
+
        if (unlikely(ret)) {
                PMD_RX_LOG(ERR, rxq,
                           "Failed to allocate %d rx buffers "
@@ -63,20 +69,17 @@ static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
        }
 
        for (i = 0; i < count; i++) {
-               mbuf = obj_p[i];
-               if (likely(i < count - 1))
-                       rte_prefetch0(obj_p[i + 1]);
+               rte_prefetch0(rxq->sw_rx_ring[(idx + 1) & NUM_RX_BDS(rxq)]);
+               mbuf = rxq->sw_rx_ring[idx & NUM_RX_BDS(rxq)];
 
-               idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
-               rxq->sw_rx_ring[idx].mbuf = mbuf;
-               rxq->sw_rx_ring[idx].page_offset = 0;
                mapping = rte_mbuf_data_iova_default(mbuf);
                rx_bd = (struct eth_rx_bd *)
                        ecore_chain_produce(&rxq->rx_bd_ring);
                rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
                rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
-               rxq->sw_rx_prod++;
+               idx++;
        }
+       rxq->sw_rx_prod = idx;
 
        return 0;
 }
@@ -309,9 +312,9 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
 
        if (rxq->sw_rx_ring) {
                for (i = 0; i < rxq->nb_rx_desc; i++) {
-                       if (rxq->sw_rx_ring[i].mbuf) {
-                               rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
-                               rxq->sw_rx_ring[i].mbuf = NULL;
+                       if (rxq->sw_rx_ring[i]) {
+                               rte_pktmbuf_free(rxq->sw_rx_ring[i]);
+                               rxq->sw_rx_ring[i] = NULL;
                        }
                }
        }
@@ -718,9 +721,10 @@ qede_update_rx_prod(__rte_unused struct qede_dev *edev,
 {
        uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
        uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
-       struct eth_rx_prod_data rx_prods = { 0 };
+       struct eth_rx_prod_data rx_prods;
 
        /* Update producers */
+       memset(&rx_prods, 0, sizeof(rx_prods));
        rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
        rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
 
@@ -893,8 +897,10 @@ qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
        struct rte_mbuf *mbuf;
        uint16_t nb_segs;
        uint16_t idx;
+       uint16_t first_idx;
 
        rte_compiler_barrier();
+       rte_prefetch0(txq->hw_cons_ptr);
        sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
        hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
@@ -907,6 +913,7 @@ qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
 
        remaining = hw_bd_cons - sw_tx_cons;
        txq->nb_tx_avail += remaining;
+       first_idx = idx;
 
        while (remaining) {
                mbuf = txq->sw_tx_ring[idx];
@@ -914,6 +921,12 @@ qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
                nb_segs = mbuf->nb_segs;
                remaining -= nb_segs;
 
+               /* Prefetch the next mbuf. Note that at least the last 4 mbufs
+                * that are prefetched will not be used in the current call.
+                */
+               rte_mbuf_prefetch_part1(txq->sw_tx_ring[(idx + 4) & mask]);
+               rte_mbuf_prefetch_part2(txq->sw_tx_ring[(idx + 4) & mask]);
+
                PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
 
                while (nb_segs) {
@@ -921,11 +934,19 @@ qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
                        nb_segs--;
                }
 
-               rte_pktmbuf_free(mbuf);
                idx = (idx + 1) & mask;
                PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
        }
        txq->sw_tx_cons = idx;
+
+       if (first_idx > idx) {
+               rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx],
+                                                         mask - first_idx + 1);
+               rte_pktmbuf_free_bulk(&txq->sw_tx_ring[0], idx);
+       } else {
+               rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx],
+                                                         idx - first_idx);
+       }
 }
 
 static int qede_drain_txq(struct qede_dev *qdev,
@@ -1301,18 +1322,15 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
 
 static inline void
 qede_reuse_page(__rte_unused struct qede_dev *qdev,
-               struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
+               struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons)
 {
        struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
        uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
-       struct qede_rx_entry *curr_prod;
        dma_addr_t new_mapping;
 
-       curr_prod = &rxq->sw_rx_ring[idx];
-       *curr_prod = *curr_cons;
+       rxq->sw_rx_ring[idx] = curr_cons;
 
-       new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
-                     curr_prod->page_offset;
+       new_mapping = rte_mbuf_data_iova_default(curr_cons);
 
        rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
        rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
@@ -1324,10 +1342,10 @@ static inline void
 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
                        struct qede_dev *qdev, uint8_t count)
 {
-       struct qede_rx_entry *curr_cons;
+       struct rte_mbuf *curr_cons;
 
        for (; count > 0; count--) {
-               curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
+               curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
                qede_reuse_page(qdev, rxq, curr_cons);
                qede_rx_bd_ring_consume(rxq);
        }
@@ -1349,7 +1367,7 @@ qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
        if (rte_le_to_cpu_16(len)) {
                tpa_info = &rxq->tpa_info[agg_index];
                cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
-               curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+               curr_frag = rxq->sw_rx_ring[cons_idx];
                assert(curr_frag);
                curr_frag->nb_segs = 1;
                curr_frag->pkt_len = rte_le_to_cpu_16(len);
@@ -1481,7 +1499,7 @@ qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,
                        return -EINVAL;
                }
                sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
-               seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
+               seg2 = rxq->sw_rx_ring[sw_rx_index];
                qede_rx_bd_ring_consume(rxq);
                pkt_len -= cur_size;
                seg2->data_len = cur_size;
@@ -1600,7 +1618,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                /* Get the data from the SW ring */
                sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
-               rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+               rx_mb = rxq->sw_rx_ring[sw_rx_index];
                assert(rx_mb != NULL);
 
                parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
@@ -1699,7 +1717,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                /* Prefetch next mbuf while processing current one. */
                preload_idx = rxq->sw_rx_cons & num_rx_bds;
-               rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+               rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
 
                /* Update rest of the MBUF fields */
                rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
@@ -1857,7 +1875,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                /* Get the data from the SW ring */
                sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
-               rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+               rx_mb = rxq->sw_rx_ring[sw_rx_index];
                assert(rx_mb != NULL);
 
                /* Handle regular CQE or TPA start CQE */
@@ -1988,7 +2006,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                /* Prefetch next mbuf while processing current one. */
                preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
-               rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+               rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
 
                /* Update rest of the MBUF fields */
                rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;