X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_rxtx_vec.c;h=89cd95829e956ff3d713814b8c31e15466d80514;hb=dc448dc4609fb82655283c8e51692e33a01d8fe7;hp=b724486e9d21af3223be57923152b343ce9e5a6e;hpb=5a239126edb36eaa026829fca1dcbbd5f43d4cd0;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c index b724486e9d..89cd95829e 100644 --- a/drivers/net/fm10k/fm10k_rxtx_vec.c +++ b/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -324,9 +324,6 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq) /* Flush mbuf with pkt template. * Data to be rearmed is 6 bytes long. - * Though, RX will overwrite ol_flags that are coming next - * anyway. So overwrite whole 8 bytes with one load: - * 6 bytes of rearm_data plus first 2 bytes of ol_flags. */ p0 = (uintptr_t)&mb0->rearm_data; *(uint64_t *)p0 = rxq->mbuf_initializer; @@ -478,6 +475,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, /* Read desc statuses backwards to avoid race condition */ /* A.1 load 4 pkts desc */ descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + rte_compiler_barrier(); /* B.2 copy 2 mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); @@ -486,8 +484,10 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]); descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + rte_compiler_barrier(); /* B.1 load 2 mbuf point */ descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + rte_compiler_barrier(); descs0[0] = _mm_loadu_si128((__m128i *)(rxdp)); /* B.2 copy 2 mbuf point into rx_pkts */ @@ -751,12 +751,12 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq) * next_dd - (rs_thresh-1) */ txep = &txq->sw_ring[txq->next_dd - (n - 1)]; - m = __rte_pktmbuf_prefree_seg(txep[0]); + m = rte_pktmbuf_prefree_seg(txep[0]); if (likely(m != NULL)) { free[0] = m; nb_free = 1; for (i = 1; i < n; i++) { - m = __rte_pktmbuf_prefree_seg(txep[i]); + m = rte_pktmbuf_prefree_seg(txep[i]); if (likely(m != NULL)) { if (likely(m->pool == free[0]->pool)) free[nb_free++] = m; @@ -771,7 +771,7 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq) rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); } else { for (i = 1; i < n; i++) { - m = __rte_pktmbuf_prefree_seg(txep[i]); + m = rte_pktmbuf_prefree_seg(txep[i]); if (m != NULL) rte_mempool_put(m->pool, m); } @@ -797,8 +797,8 @@ tx_backlog_entry(struct rte_mbuf **txep, } uint16_t -fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) { struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue; volatile struct fm10k_tx_desc *txdp;