-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
*/
#include <inttypes.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include "fm10k.h"
#include "base/fm10k_type.h"
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
+static void
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq);
+
/* Handling the offload flags (olflags) field takes computation
* time when receiving packets. Therefore we provide a flag to disable
* the processing of the olflags field when they are not needed. This
#define L3TYPE_SHIFT (4)
/* L4 type shift */
#define L4TYPE_SHIFT (7)
+/* HBO flag shift */
+#define HBOFLAG_SHIFT (10)
+/* RXE flag shift */
+#define RXEFLAG_SHIFT (13)
+/* IPE/L4E flag shift */
+#define L3L4EFLAG_SHIFT (14)
+/* shift PKT_RX_L4_CKSUM_GOOD into one byte by 1 bit */
+#define CKSUM_SHIFT (1)
static inline void
fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
{
- __m128i ptype0, ptype1, vtag0, vtag1;
+ __m128i ptype0, ptype1, vtag0, vtag1, eflag0, eflag1, cksumflag;
union {
uint16_t e[4];
uint64_t dword;
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
0x000F, 0x000F, 0x000F, 0x000F);
+ /* mask for HBO and RXE flag flags */
+ const __m128i rxe_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0001, 0x0001, 0x0001, 0x0001);
+
+ /* mask the lower byte of ol_flags */
+ const __m128i ol_flags_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x00FF, 0x00FF, 0x00FF, 0x00FF);
+
+ const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
+
+ const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0);
+
/* map rss type to rss hash flag */
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+ /* Calculate RSS_hash and Vlan fields */
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
+ eflag0 = vtag1;
+ cksumflag = vtag1;
vtag1 = _mm_srli_epi16(vtag1, VP_SHIFT);
vtag1 = _mm_and_si128(vtag1, pkttype_msk);
vtag1 = _mm_or_si128(ptype0, vtag1);
+
+ /* Process err flags, simply set RECIP_ERR bit if HBO/IXE is set */
+ eflag1 = _mm_srli_epi16(eflag0, RXEFLAG_SHIFT);
+ eflag0 = _mm_srli_epi16(eflag0, HBOFLAG_SHIFT);
+ eflag0 = _mm_or_si128(eflag0, eflag1);
+ eflag0 = _mm_and_si128(eflag0, rxe_msk);
+ eflag0 = _mm_shuffle_epi8(rxe_flag, eflag0);
+
+ vtag1 = _mm_or_si128(eflag0, vtag1);
+
+ /* Process L4/L3 checksum error flags */
+ cksumflag = _mm_srli_epi16(cksumflag, L3L4EFLAG_SHIFT);
+ cksumflag = _mm_shuffle_epi8(l3l4cksum_flag, cksumflag);
+
+ /* clean the higher byte and shift back the flag bits */
+ cksumflag = _mm_and_si128(cksumflag, ol_flags_msk);
+ cksumflag = _mm_slli_epi16(cksumflag, CKSUM_SHIFT);
+ vtag1 = _mm_or_si128(cksumflag, vtag1);
+
vol.dword = _mm_cvtsi128_si64(vtag1);
rx_pkts[0]->ol_flags = vol.e[0];
rx_pkts[3]->ol_flags = vol.e[3];
}
+/* @note: When this function is changed, make corresponding change to
+ * fm10k_dev_supported_ptypes_get().
+ */
static inline void
fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
{
#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
/* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_extend != 0)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
return -1;
#endif
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
- /* - no csum error report support
- * - no header split support
- */
- if (rxmode->hw_ip_checksum == 1 ||
- rxmode->header_split == 1)
+ /* no header split support */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
return 0;
/* Flush mbuf with pkt template.
* Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
*/
p0 = (uintptr_t)&mb0->rearm_data;
*(uint64_t *)p0 = rxq->mbuf_initializer;
p1 = (uintptr_t)&mb1->rearm_data;
*(uint64_t *)p1 = rxq->mbuf_initializer;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
*/
rxdp = rxq->hw_ring + next_dd;
- _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+ rte_prefetch0(rxdp);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
0xFF, 0xFF, /* skip high 16 bits pkt_type */
0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
__m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ __m128i mbp1;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf poitns */
mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+#endif
descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
/* B.1 load 2 mbuf point */
descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
/* avoid compiler reorder optimization */
rte_compiler_barrier();
if (split_packet) {
- rte_prefetch0(&rx_pkts[pos]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
}
/* D.1 pkt 3,4 convert format from desc to pktmbuf */
if (!split_flags[buf_idx]) {
/* it's the last packet of the set */
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
start->hash = end->hash;
start->ol_flags = end->ol_flags;
+ start->packet_type = end->packet_type;
+#endif
pkts[pkt_idx++] = start;
start = end = NULL;
}
i++;
if (i == nb_bufs)
return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
}
return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
&split_flags[i]);
}
+
+static const struct fm10k_txq_ops vec_txq_ops = {
+ .reset = fm10k_reset_tx_queue,
+};
+
+void __attribute__((cold))
+fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
+{
+ txq->ops = &vec_txq_ops;
+}
+
+int __attribute__((cold))
+fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
+{
+ /* Vector TX can't offload any features yet */
+ if (txq->offloads != 0)
+ return -1;
+
+ if (txq->tx_ftag_en)
+ return -1;
+
+ return 0;
+}
+
+static inline void
+vtx1(volatile struct fm10k_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ __m128i descriptor = _mm_set_epi64x(flags << 56 |
+ (uint64_t)pkt->vlan_tci << 16 | (uint64_t)pkt->data_len,
+ MBUF_DMA_ADDR(pkt));
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct fm10k_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static __rte_always_inline int
+fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
+{
+ struct rte_mbuf **txep;
+ uint8_t flags;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ flags = txq->hw_ring[txq->next_dd].flags;
+ if (!(flags & FM10K_TXD_FLAG_DONE))
+ return 0;
+
+ n = txq->rs_thresh;
+
+ /* First buffer to free from S/W ring is at index
+ * next_dd - (rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->next_dd - (n - 1)];
+ m = rte_pktmbuf_prefree_seg(txep[0]);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
+ txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
+ if (txq->next_dd >= txq->nb_desc)
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+
+ return txq->rs_thresh;
+}
+
+static __rte_always_inline void
+tx_backlog_entry(struct rte_mbuf **txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i] = tx_pkts[i];
+}
+
+uint16_t
+fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
+ volatile struct fm10k_tx_desc *txdp;
+ struct rte_mbuf **txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = FM10K_TXD_FLAG_LAST;
+ uint64_t rs = FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_LAST;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
+
+ if (txq->nb_free < txq->free_thresh)
+ fm10k_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->next_free;
+ txdp = &txq->hw_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &(txq->hw_ring[tx_id]);
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->next_rs) {
+ txq->hw_ring[txq->next_rs].flags |= FM10K_TXD_FLAG_RS;
+ txq->next_rs = (uint16_t)(txq->next_rs + txq->rs_thresh);
+ }
+
+ txq->next_free = tx_id;
+
+ FM10K_PCI_REG_WRITE(txq->tail_ptr, txq->next_free);
+
+ return nb_pkts;
+}
+
+static void __attribute__((cold))
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
+{
+ static const struct fm10k_tx_desc zeroed_desc = {0};
+ struct rte_mbuf **txe = txq->sw_ring;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_desc; i++)
+ txq->hw_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_desc; i++)
+ txe[i] = NULL;
+
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ txq->next_free = 0;
+ txq->nb_used = 0;
+ /* Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->nb_free = (uint16_t)(txq->nb_desc - 1);
+ FM10K_PCI_REG_WRITE(txq->tail_ptr, 0);
+}