X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_rxtx_vec.c;h=eff3933b5c348ab2379d71de44735d9cbb8b157a;hb=3c334fcd9e565258667ec052550ab023d0fc9adb;hp=89cd95829e956ff3d713814b8c31e15466d80514;hpb=dc448dc4609fb82655283c8e51692e33a01d8fe7;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c index 89cd95829e..eff3933b5c 100644 --- a/drivers/net/fm10k/fm10k_rxtx_vec.c +++ b/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -1,39 +1,10 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2015 Intel Corporation */ #include -#include +#include #include #include "fm10k.h" #include "base/fm10k_type.h" @@ -81,8 +52,10 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) const __m128i pkttype_msk = _mm_set_epi16( 0x0000, 0x0000, 0x0000, 0x0000, - PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, - PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT); + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); /* mask everything except rss type */ const __m128i rsstype_msk = _mm_set_epi16( @@ -230,7 +203,7 @@ fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) #define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0) #endif -int __attribute__((cold)) +int __rte_cold fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) { #ifndef RTE_LIBRTE_IEEE1588 @@ -239,7 +212,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE /* whithout rx ol_flags, no VP flag report */ - if (rxmode->hw_vlan_extend != 0) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) return -1; #endif @@ -248,7 +221,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) return -1; /* no header split support */ - if (rxmode->header_split == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) return -1; return 0; @@ -258,7 +231,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) #endif } -int __attribute__((cold)) +int __rte_cold fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq) { uintptr_t p; @@ -330,7 +303,9 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq) p1 = (uintptr_t)&mb1->rearm_data; *(uint64_t *)p1 = rxq->mbuf_initializer; - /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != + offsetof(struct rte_mbuf, buf_addr) + 8); vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); @@ -374,7 +349,7 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq) FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id); } -void __attribute__((cold)) +void __rte_cold fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq) { const unsigned mask = rxq->nb_desc - 1; @@ -384,8 +359,15 @@ fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq) return; /* free all mbufs that are valid in the ring */ - for (i = rxq->next_dd; i != rxq->rxrearm_start; i = (i + 1) & mask) - rte_pktmbuf_free_seg(rxq->sw_ring[i]); + if (rxq->rxrearm_nb == 0) { + for (i = 0; i < rxq->nb_desc; i++) + if (rxq->sw_ring[i] != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + } else { + for (i = rxq->next_dd; i != rxq->rxrearm_start; + i = (i + 1) & mask) + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + } rxq->rxrearm_nb = rxq->nb_desc; /* set all entries to NULL */ @@ -448,6 +430,19 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 0xFF, 0xFF, /* skip high 16 bits pkt_type */ 0xFF, 0xFF /* Skip pkt_type field in shuffle operation */ ); + /* + * Compile-time verify the shuffle mask + * NOTE: some field positions already verified above, but duplicated + * here for completeness in case of future modifications. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); /* Cache is empty -> need to scan the buffer rings, but first move * the next 'n' mbufs into the cache @@ -467,9 +462,13 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, __m128i descs0[RTE_FM10K_DESCS_PER_LOOP]; __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; __m128i zero, staterr, sterr_tmp1, sterr_tmp2; - __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */ + __m128i mbp1; + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ +#if defined(RTE_ARCH_X86_64) + __m128i mbp2; +#endif - /* B.1 load 1 mbuf point */ + /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]); /* Read desc statuses backwards to avoid race condition */ @@ -477,11 +476,13 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); rte_compiler_barrier(); - /* B.2 copy 2 mbuf point into rx_pkts */ + /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); - /* B.1 load 1 mbuf point */ +#if defined(RTE_ARCH_X86_64) + /* B.1 load 2 64 bit mbuf poitns */ mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]); +#endif descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); rte_compiler_barrier(); @@ -490,8 +491,10 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, rte_compiler_barrier(); descs0[0] = _mm_loadu_si128((__m128i *)(rxdp)); +#if defined(RTE_ARCH_X86_64) /* B.2 copy 2 mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); +#endif /* avoid compiler reorder optimization */ rte_compiler_barrier(); @@ -682,6 +685,7 @@ fm10k_recv_scattered_pkts_vec(void *rx_queue, i++; if (i == nb_bufs) return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; } return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, &split_flags[i]); @@ -691,17 +695,17 @@ static const struct fm10k_txq_ops vec_txq_ops = { .reset = fm10k_reset_tx_queue, }; -void __attribute__((cold)) +void __rte_cold fm10k_txq_vec_setup(struct fm10k_tx_queue *txq) { txq->ops = &vec_txq_ops; } -int __attribute__((cold)) +int __rte_cold fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq) { /* Vector TX can't offload any features yet */ - if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != FM10K_SIMPLE_TX_FLAG) + if (txq->offloads != 0) return -1; if (txq->tx_ftag_en) @@ -715,7 +719,7 @@ vtx1(volatile struct fm10k_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { __m128i descriptor = _mm_set_epi64x(flags << 56 | - pkt->vlan_tci << 16 | pkt->data_len, + (uint64_t)pkt->vlan_tci << 16 | (uint64_t)pkt->data_len, MBUF_DMA_ADDR(pkt)); _mm_store_si128((__m128i *)txdp, descriptor); } @@ -730,7 +734,7 @@ vtx(volatile struct fm10k_tx_desc *txdp, vtx1(txdp, *pkt, flags); } -static inline int __attribute__((always_inline)) +static __rte_always_inline int fm10k_tx_free_bufs(struct fm10k_tx_queue *txq) { struct rte_mbuf **txep; @@ -786,7 +790,7 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq) return txq->rs_thresh; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void tx_backlog_entry(struct rte_mbuf **txep, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -860,7 +864,7 @@ fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_pkts; } -static void __attribute__((cold)) +static void __rte_cold fm10k_reset_tx_queue(struct fm10k_tx_queue *txq) { static const struct fm10k_tx_desc zeroed_desc = {0};