void
cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2] = {
#define R(name, f3, f2, f1, f0, flags) \
[f3][f2][f1][f0] = cn9k_nix_recv_pkts_##name,
#undef R
};
+ const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags) \
+ [f3][f2][f1][f0] = cn9k_nix_recv_pkts_mseg_##name,
+
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
pick_rx_func(eth_dev, nix_eth_rx_burst);
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
+
+ /* Copy multi seg version with no offload for tear down sequence */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ dev->rx_pkt_burst_no_offload =
+ nix_eth_rx_burst_mseg[0][0][0][0];
rte_mb();
}
return ol_flags;
}
+static __rte_always_inline void
+nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
+ uint64_t rearm)
+{
+ const rte_iova_t *iova_list;
+ struct rte_mbuf *head;
+ const rte_iova_t *eol;
+ uint8_t nb_segs;
+ uint64_t sg;
+
+ sg = *(const uint64_t *)(rx + 1);
+ nb_segs = (sg >> 48) & 0x3;
+ mbuf->nb_segs = nb_segs;
+ mbuf->data_len = sg & 0xFFFF;
+ sg = sg >> 16;
+
+ eol = ((const rte_iova_t *)(rx + 1) +
+ ((rx->cn9k.desc_sizem1 + 1) << 1));
+ /* Skip SG_S and first IOVA*/
+ iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
+ nb_segs--;
+
+ rearm = rearm & ~0xFFFF;
+
+ head = mbuf;
+ while (nb_segs) {
+ mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
+ mbuf = mbuf->next;
+
+ __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+
+ mbuf->data_len = sg & 0xFFFF;
+ sg = sg >> 16;
+ *(uint64_t *)(&mbuf->rearm_data) = rearm;
+ nb_segs--;
+ iova_list++;
+
+ if (!nb_segs && (iova_list + 1 < eol)) {
+ sg = *(const uint64_t *)(iova_list);
+ nb_segs = (sg >> 48) & 0x3;
+ head->nb_segs += nb_segs;
+ iova_list = (const rte_iova_t *)(iova_list + 1);
+ }
+ }
+ mbuf->next = NULL;
+}
+
static __rte_always_inline void
cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
struct rte_mbuf *mbuf, const void *lookup_mem,
*(uint64_t *)(&mbuf->rearm_data) = val;
mbuf->pkt_len = len;
- mbuf->data_len = len;
- mbuf->next = NULL;
+ if (flag & NIX_RX_MULTI_SEG_F) {
+ nix_cqe_xtract_mseg(rx, mbuf, val);
+ } else {
+ mbuf->data_len = len;
+ mbuf->next = NULL;
+ }
}
static inline uint16_t
R(mark_cksum_ptype, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F)\
R(mark_cksum_ptype_rss, 1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)
-#define R(name, f3, f2, f1, f0, flags) \
+#define R(name, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
+ void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
+ \
+ uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
NIX_RX_FASTPATH_MODES
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_ethdev.h"
+#include "cn9k_rx.h"
+
+#define R(name, f3, f2, f1, f0, flags) \
+ uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
+ void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
+ { \
+ return cn9k_nix_recv_pkts(rx_queue, rx_pkts, pkts, \
+ (flags) | NIX_RX_MULTI_SEG_F); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R