-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
*/
#include <inttypes.h>
-#include <rte_ethdev.h>
+#include <ethdev_driver.h>
#include <rte_common.h>
#include "fm10k.h"
#include "base/fm10k_type.h"
#define RXEFLAG_SHIFT (13)
/* IPE/L4E flag shift */
#define L3L4EFLAG_SHIFT (14)
-/* shift PKT_RX_L4_CKSUM_GOOD into one byte by 1 bit */
+/* shift RTE_MBUF_F_RX_L4_CKSUM_GOOD into one byte by 1 bit */
#define CKSUM_SHIFT (1)
static inline void
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
- (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
/* map rss type to rss hash flag */
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
- 0, 0, 0, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+ 0, 0, 0, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
/* Calculate RSS_hash and Vlan fields */
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
#define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0)
#endif
-int __attribute__((cold))
+int __rte_cold
fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_extend != 0)
+ /* without rx ol_flags, no VP flag report */
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
return -1;
#endif
return -1;
/* no header split support */
- if (rxmode->header_split == 1)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
return -1;
return 0;
#endif
}
-int __attribute__((cold))
+int __rte_cold
fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
{
uintptr_t p;
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
mb_def.nb_segs = 1;
- /* data_off will be ajusted after new mbuf allocated for 512-byte
+ /* data_off will be adjusted after new mbuf allocated for 512-byte
* alignment.
*/
mb_def.data_off = RTE_PKTMBUF_HEADROOM;
/* Flush mbuf with pkt template.
* Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
*/
p0 = (uintptr_t)&mb0->rearm_data;
*(uint64_t *)p0 = rxq->mbuf_initializer;
p1 = (uintptr_t)&mb1->rearm_data;
*(uint64_t *)p1 = rxq->mbuf_initializer;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
}
-void __attribute__((cold))
+void __rte_cold
fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq)
{
const unsigned mask = rxq->nb_desc - 1;
return;
/* free all mbufs that are valid in the ring */
- for (i = rxq->next_dd; i != rxq->rxrearm_start; i = (i + 1) & mask)
- rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_desc; i++)
+ if (rxq->sw_ring[i] != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ } else {
+ for (i = rxq->next_dd; i != rxq->rxrearm_start;
+ i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
rxq->rxrearm_nb = rxq->nb_desc;
/* set all entries to NULL */
if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
return 0;
- /* Vecotr RX will process 4 packets at a time, strip the unaligned
+ /* Vector RX will process 4 packets at a time, strip the unaligned
* tails in case it's not multiple of 4.
*/
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
0xFF, 0xFF, /* skip high 16 bits pkt_type */
0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
__m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ __m128i mbp1;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+#endif
+ /* A.1 load desc[2-0] */
descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
/* avoid compiler reorder optimization */
rte_compiler_barrier();
/* and with mask to extract bits, flipping 1-0 */
__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
/* the staterr values are not in order, as the count
- * count of dd bits doesn't care. However, for end of
+ * of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
* compresses the 32-bit values to 8-bit
*/
fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
return pkt_idx;
}
-/*
- * vPMD receive routine that reassembles scattered packets
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
*
* Notice:
* - don't support ol_flags for rss and csum err
- * - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST
- * numbers of DD bit
*/
-uint16_t
-fm10k_recv_scattered_pkts_vec(void *rx_queue,
- struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static uint16_t
+fm10k_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct fm10k_rx_queue *rxq = rx_queue;
uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0};
i++;
if (i == nb_bufs)
return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
}
return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
&split_flags[i]);
}
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+fm10k_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > RTE_FM10K_MAX_RX_BURST) {
+ uint16_t burst;
+
+ burst = fm10k_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ RTE_FM10K_MAX_RX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < RTE_FM10K_MAX_RX_BURST)
+ return retval;
+ }
+
+ return retval + fm10k_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ nb_pkts);
+}
+
static const struct fm10k_txq_ops vec_txq_ops = {
.reset = fm10k_reset_tx_queue,
};
-void __attribute__((cold))
+void __rte_cold
fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
{
txq->ops = &vec_txq_ops;
}
-int __attribute__((cold))
+int __rte_cold
fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
{
/* Vector TX can't offload any features yet */
- if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != FM10K_SIMPLE_TX_FLAG)
+ if (txq->offloads != 0)
return -1;
if (txq->tx_ftag_en)
struct rte_mbuf *pkt, uint64_t flags)
{
__m128i descriptor = _mm_set_epi64x(flags << 56 |
- pkt->vlan_tci << 16 | pkt->data_len,
+ (uint64_t)pkt->vlan_tci << 16 | (uint64_t)pkt->data_len,
MBUF_DMA_ADDR(pkt));
_mm_store_si128((__m128i *)txdp, descriptor);
}
vtx1(txdp, *pkt, flags);
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
{
struct rte_mbuf **txep;
return txq->rs_thresh;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
tx_backlog_entry(struct rte_mbuf **txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
return nb_pkts;
}
-static void __attribute__((cold))
+static void __rte_cold
fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
{
static const struct fm10k_tx_desc zeroed_desc = {0};