Base PTP timesync support is added for cn9k and cn10k platforms.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
return flags;
}
{
struct nix_send_ext_s *send_hdr_ext;
union nix_send_hdr_w0_u send_hdr_w0;
+ struct nix_send_mem_s *send_mem;
union nix_send_sg_s sg_w0;
RTE_SET_USED(dev);
send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[0];
send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+ if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ /* Default: one seg packet would have:
+ * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
+ * => 8/2 - 1 = 3
+ */
+ send_hdr_w0.sizem1 = 3;
+ send_hdr_ext->w0.tstmp = 1;
+
+ /* To calculate the offset for send_mem,
+ * send_hdr->w0.sizem1 * 2
+ */
+ send_mem = (struct nix_send_mem_s *)(txq->cmd + 2);
+ send_mem->w0.subdc = NIX_SUBDC_MEM;
+ send_mem->w0.alg = NIX_SENDMEMALG_SETTSTMP;
+ send_mem->addr = dev->tstamp.tx_tstamp_iova;
+ }
} else {
/* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
send_hdr_w0.sizem1 = 1;
rxq->wdata = cq->wdata;
rxq->head = cq->head;
rxq->qmask = cq->qmask;
+ rxq->tstamp = &dev->tstamp;
/* Data offset from data to start of mbuf is first_skip */
rxq->data_off = rq->first_skip;
cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
int rc;
/* Common eth dev start */
if (rc)
return rc;
+ /* Update VF about data off shifted by 8 bytes if PTP already
+ * enabled in PF owning this VF
+ */
+ if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
+ nix_ptp_enable_vf(eth_dev);
+
/* Setting up the rx[tx]_offload_flags due to change
* in rx[tx]_offloads.
*/
uint32_t available;
uint16_t data_off;
uint16_t rq;
+ struct cnxk_timesync_info *tstamp;
} __plt_cache_aligned;
/* Rx and Tx routines */
#include "cn10k_ethdev.h"
#include "cn10k_rx.h"
-#define R(name, f3, f2, f1, f0, flags) \
+#define R(name, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
static inline void
pick_rx_func(struct rte_eth_dev *eth_dev,
- const eth_rx_burst_t rx_burst[2][2][2][2])
+ const eth_rx_burst_t rx_burst[2][2][2][2][2])
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- /* [MARK] [CKSUM] [PTYPE] [RSS] */
+ /* [TSP] [MARK] [CKSUM] [PTYPE] [RSS] */
eth_dev->rx_pkt_burst = rx_burst
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_CHECKSUM_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)]
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2] = {
-#define R(name, f3, f2, f1, f0, flags) \
- [f3][f2][f1][f0] = cn10k_nix_recv_pkts_##name,
+ const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2] = {
+#define R(name, f4, f3, f2, f1, f0, flags) \
+ [f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2] = {
-#define R(name, f3, f2, f1, f0, flags) \
- [f3][f2][f1][f0] = cn10k_nix_recv_pkts_mseg_##name,
+ const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2] = {
+#define R(name, f4, f3, f2, f1, f0, flags) \
+ [f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_mseg_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2] = {
-#define R(name, f3, f2, f1, f0, flags) \
- [f3][f2][f1][f0] = cn10k_nix_recv_pkts_vec_##name,
+ const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2] = {
+#define R(name, f4, f3, f2, f1, f0, flags) \
+ [f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_vec_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- if (dev->scalar_ena)
+ /* For PTP enabled, scalar rx function should be chosen as most of the
+ * PTP apps are implemented to rx burst 1 pkt.
+ */
+ if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
pick_rx_func(eth_dev, nix_eth_rx_burst);
else
pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
/* Copy multi seg version with no offload for tear down sequence */
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
dev->rx_pkt_burst_no_offload =
- nix_eth_rx_burst_mseg[0][0][0][0];
+ nix_eth_rx_burst_mseg[0][0][0][0][0];
rte_mb();
}
#include <rte_ether.h>
#include <rte_vect.h>
+#include <cnxk_ethdev.h>
+
#define NIX_RX_OFFLOAD_NONE (0)
#define NIX_RX_OFFLOAD_RSS_F BIT(0)
#define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
cn10k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
flags);
+ cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
+ (flags & NIX_RX_OFFLOAD_TSTAMP_F),
+ (uint64_t *)((uint8_t *)mbuf + data_off)
+ );
rx_pkts[packets++] = mbuf;
roc_prefetch_store_keep(mbuf);
head++;
#define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
#define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
#define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
-
-/* [MARK] [CKSUM] [PTYPE] [RSS] */
-#define NIX_RX_FASTPATH_MODES \
-R(no_offload, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
-R(rss, 0, 0, 0, 1, RSS_F) \
-R(ptype, 0, 0, 1, 0, PTYPE_F) \
-R(ptype_rss, 0, 0, 1, 1, PTYPE_F | RSS_F) \
-R(cksum, 0, 1, 0, 0, CKSUM_F) \
-R(cksum_rss, 0, 1, 0, 1, CKSUM_F | RSS_F) \
-R(cksum_ptype, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
-R(cksum_ptype_rss, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
-R(mark, 1, 0, 0, 0, MARK_F) \
-R(mark_rss, 1, 0, 0, 1, MARK_F | RSS_F) \
-R(mark_ptype, 1, 0, 1, 0, MARK_F | PTYPE_F) \
-R(mark_ptype_rss, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
-R(mark_cksum, 1, 1, 0, 0, MARK_F | CKSUM_F) \
-R(mark_cksum_rss, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
-R(mark_cksum_ptype, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F)\
-R(mark_cksum_ptype_rss, 1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)
-
-#define R(name, f3, f2, f1, f0, flags) \
+#define TS_F NIX_RX_OFFLOAD_TSTAMP_F
+
+/* [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
+#define NIX_RX_FASTPATH_MODES \
+R(no_offload, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
+R(rss, 0, 0, 0, 0, 1, RSS_F) \
+R(ptype, 0, 0, 0, 1, 0, PTYPE_F) \
+R(ptype_rss, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
+R(cksum, 0, 0, 1, 0, 0, CKSUM_F) \
+R(cksum_rss, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
+R(cksum_ptype, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
+R(cksum_ptype_rss, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
+R(mark, 0, 1, 0, 0, 0, MARK_F) \
+R(mark_rss, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
+R(mark_ptype, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
+R(mark_ptype_rss, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
+R(mark_cksum, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
+R(mark_cksum_rss, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
+R(mark_cksum_ptype, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
+R(mark_cksum_ptype_rss, 0, 1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)\
+R(ts, 1, 0, 0, 0, 0, TS_F) \
+R(ts_rss, 1, 0, 0, 0, 1, TS_F | RSS_F) \
+R(ts_ptype, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
+R(ts_ptype_rss, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
+R(ts_cksum, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
+R(ts_cksum_rss, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
+R(ts_cksum_ptype, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
+R(ts_cksum_ptype_rss, 1, 0, 1, 1, 1, TS_F | CKSUM_F | PTYPE_F | RSS_F)\
+R(ts_mark, 1, 1, 0, 0, 0, TS_F | MARK_F) \
+R(ts_mark_rss, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
+R(ts_mark_ptype, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
+R(ts_mark_ptype_rss, 1, 1, 0, 1, 1, TS_F | MARK_F | PTYPE_F | RSS_F)\
+R(ts_mark_cksum, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
+R(ts_mark_cksum_rss, 1, 1, 1, 0, 1, TS_F | MARK_F | CKSUM_F | RSS_F)\
+R(ts_mark_cksum_ptype, 1, 1, 1, 1, 0, TS_F | MARK_F | CKSUM_F | PTYPE_F)\
+R(ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
+
+#define R(name, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
\
#include "cn10k_ethdev.h"
#include "cn10k_rx.h"
-#define R(name, f3, f2, f1, f0, flags) \
+#define R(name, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_mseg_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
#include "cn10k_ethdev.h"
#include "cn10k_rx.h"
-#define R(name, f3, f2, f1, f0, flags) \
+#define R(name, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot \
cn10k_nix_recv_pkts_vec_##name(void *rx_queue, \
struct rte_mbuf **rx_pkts, \
uint16_t pkts) \
{ \
+ /* TSTMP is not supported by vector */ \
+ if ((flags) & NIX_RX_OFFLOAD_TSTAMP_F) \
+ return 0; \
return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts, \
(flags)); \
}
#include "cn10k_ethdev.h"
#include "cn10k_tx.h"
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_##name( \
void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts) \
{ \
static inline void
pick_tx_func(struct rte_eth_dev *eth_dev,
- const eth_tx_burst_t tx_burst[2][2][2][2][2])
+ const eth_tx_burst_t tx_burst[2][2][2][2][2][2])
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- /* [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
+ /* [TSP] [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
eth_dev->tx_pkt_burst = tx_burst
+ [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F)]
[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)]
[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2] = {
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
- [f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_##name,
+ const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2] = {
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_##name,
NIX_TX_FASTPATH_MODES
#undef T
};
- const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2] = {
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
- [f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_mseg_##name,
+ const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2] = {
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_mseg_##name,
NIX_TX_FASTPATH_MODES
#undef T
};
- const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2] = {
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
- [f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_vec_##name,
+ const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2] = {
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_vec_##name,
NIX_TX_FASTPATH_MODES
#undef T
if (dev->scalar_ena ||
(dev->tx_offload_flags &
- (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)))
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F |
+ NIX_TX_OFFLOAD_TSO_F)))
pick_tx_func(eth_dev, nix_eth_tx_burst);
else
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
#define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
#define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
#define NIX_TX_OFFLOAD_TSO_F BIT(4)
+#define NIX_TX_OFFLOAD_TSTAMP_F BIT(5)
/* Flags to control xmit_prepare function.
* Defining it from backwards to denote its been
NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
#define NIX_TX_NEED_EXT_HDR \
- (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F | \
+ NIX_TX_OFFLOAD_TSO_F)
#define NIX_XMIT_FC_OR_RETURN(txq, pkts) \
do { \
static __rte_always_inline int
cn10k_nix_tx_ext_subs(const uint16_t flags)
{
- return (flags &
- (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ? 1 : 0;
+ return (flags & NIX_TX_OFFLOAD_TSTAMP_F)
+ ? 2
+ : ((flags &
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F))
+ ? 1
+ : 0);
}
static __rte_always_inline uint8_t
*(rte_iova_t *)(lmt_addr + 8) = *(rte_iova_t *)(sg + 1);
}
+static __rte_always_inline void
+cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,
+ const uint64_t ol_flags, const uint16_t no_segdw,
+ const uint16_t flags)
+{
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
+ struct nix_send_ext_s *send_hdr_ext =
+ (struct nix_send_ext_s *)lmt_addr + 16;
+ uint64_t *lmt = (uint64_t *)lmt_addr;
+ uint16_t off = (no_segdw - 1) << 1;
+ struct nix_send_mem_s *send_mem;
+
+ send_mem = (struct nix_send_mem_s *)(lmt + off);
+ send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+ send_hdr_ext->w0.tstmp = 1;
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ /* Retrieving the default desc values */
+ lmt[off] = cmd[2];
+
+ /* Using compiler barier to avoid voilation of C
+ * aliasing rules.
+ */
+ rte_compiler_barrier();
+ }
+
+ /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
+ * should not be recorded, hence changing the alg type to
+ * NIX_SENDMEMALG_SET and also changing send mem addr field to
+ * next 8 bytes as it corrpt the actual tx tstamp registered
+ * address.
+ */
+ send_mem->w0.subdc = NIX_SUBDC_MEM;
+ send_mem->w0.alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);
+ send_mem->addr =
+ (rte_iova_t)(((uint64_t *)cmd[3]) + is_ol_tstamp);
+ }
+}
+
static __rte_always_inline uint16_t
cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
{
/* Roundup extra dwords to multiple of 2 */
segdw = (segdw >> 1) + (segdw & 0x1);
/* Default dwords */
- segdw += (off >> 1) + 1;
+ segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
send_hdr->w0.sizem1 = segdw - 1;
return segdw;
cn10k_nix_xmit_prepare(tx_pkts[i], cmd, lmt_addr, flags,
lso_tun_fmt);
+ cn10k_nix_xmit_prepare_tstamp(lmt_addr, &txq->cmd[0],
+ tx_pkts[i]->ol_flags, 4, flags);
lmt_addr += (1ULL << ROC_LMT_LINE_SIZE_LOG2);
}
/* Store sg list directly on lmt line */
segdw = cn10k_nix_prepare_mseg(tx_pkts[i], (uint64_t *)lmt_addr,
flags);
+ cn10k_nix_xmit_prepare_tstamp(lmt_addr, &txq->cmd[0],
+ tx_pkts[i]->ol_flags, segdw,
+ flags);
lmt_addr += (1ULL << ROC_LMT_LINE_SIZE_LOG2);
data128 |= (((__uint128_t)(segdw - 1)) << shft);
shft += 3;
#define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F
#define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F
#define TSO_F NIX_TX_OFFLOAD_TSO_F
+#define TSP_F NIX_TX_OFFLOAD_TSTAMP_F
-/* [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
+/* [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
#define NIX_TX_FASTPATH_MODES \
-T(no_offload, 0, 0, 0, 0, 0, 4, \
+T(no_offload, 0, 0, 0, 0, 0, 0, 4, \
NIX_TX_OFFLOAD_NONE) \
-T(l3l4csum, 0, 0, 0, 0, 1, 4, \
+T(l3l4csum, 0, 0, 0, 0, 0, 1, 4, \
L3L4CSUM_F) \
-T(ol3ol4csum, 0, 0, 0, 1, 0, 4, \
+T(ol3ol4csum, 0, 0, 0, 0, 1, 0, 4, \
OL3OL4CSUM_F) \
-T(ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 4, \
+T(ol3ol4csum_l3l4csum, 0, 0, 0, 0, 1, 1, 4, \
OL3OL4CSUM_F | L3L4CSUM_F) \
-T(vlan, 0, 0, 1, 0, 0, 6, \
+T(vlan, 0, 0, 0, 1, 0, 0, 6, \
VLAN_F) \
-T(vlan_l3l4csum, 0, 0, 1, 0, 1, 6, \
+T(vlan_l3l4csum, 0, 0, 0, 1, 0, 1, 6, \
VLAN_F | L3L4CSUM_F) \
-T(vlan_ol3ol4csum, 0, 0, 1, 1, 0, 6, \
+T(vlan_ol3ol4csum, 0, 0, 0, 1, 1, 0, 6, \
VLAN_F | OL3OL4CSUM_F) \
-T(vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 6, \
+T(vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 1, 6, \
VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(noff, 0, 1, 0, 0, 0, 4, \
+T(noff, 0, 0, 1, 0, 0, 0, 4, \
NOFF_F) \
-T(noff_l3l4csum, 0, 1, 0, 0, 1, 4, \
+T(noff_l3l4csum, 0, 0, 1, 0, 0, 1, 4, \
NOFF_F | L3L4CSUM_F) \
-T(noff_ol3ol4csum, 0, 1, 0, 1, 0, 4, \
+T(noff_ol3ol4csum, 0, 0, 1, 0, 1, 0, 4, \
NOFF_F | OL3OL4CSUM_F) \
-T(noff_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 4, \
+T(noff_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 1, 1, 4, \
NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(noff_vlan, 0, 1, 1, 0, 0, 6, \
+T(noff_vlan, 0, 0, 1, 1, 0, 0, 6, \
NOFF_F | VLAN_F) \
-T(noff_vlan_l3l4csum, 0, 1, 1, 0, 1, 6, \
+T(noff_vlan_l3l4csum, 0, 0, 1, 1, 0, 1, 6, \
NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(noff_vlan_ol3ol4csum, 0, 1, 1, 1, 0, 6, \
+T(noff_vlan_ol3ol4csum, 0, 0, 1, 1, 1, 0, 6, \
NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 6, \
+T(noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 1, 6, \
NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso, 1, 0, 0, 0, 0, 6, \
+T(tso, 0, 1, 0, 0, 0, 0, 6, \
TSO_F) \
-T(tso_l3l4csum, 1, 0, 0, 0, 1, 6, \
+T(tso_l3l4csum, 0, 1, 0, 0, 0, 1, 6, \
TSO_F | L3L4CSUM_F) \
-T(tso_ol3ol4csum, 1, 0, 0, 1, 0, 6, \
+T(tso_ol3ol4csum, 0, 1, 0, 0, 1, 0, 6, \
TSO_F | OL3OL4CSUM_F) \
-T(tso_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 6, \
+T(tso_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 1, 1, 6, \
TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_vlan, 1, 0, 1, 0, 0, 6, \
+T(tso_vlan, 0, 1, 0, 1, 0, 0, 6, \
TSO_F | VLAN_F) \
-T(tso_vlan_l3l4csum, 1, 0, 1, 0, 1, 6, \
+T(tso_vlan_l3l4csum, 0, 1, 0, 1, 0, 1, 6, \
TSO_F | VLAN_F | L3L4CSUM_F) \
-T(tso_vlan_ol3ol4csum, 1, 0, 1, 1, 0, 6, \
+T(tso_vlan_ol3ol4csum, 0, 1, 0, 1, 1, 0, 6, \
TSO_F | VLAN_F | OL3OL4CSUM_F) \
-T(tso_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 6, \
+T(tso_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 1, 6, \
TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_noff, 1, 1, 0, 0, 0, 6, \
+T(tso_noff, 0, 1, 1, 0, 0, 0, 6, \
TSO_F | NOFF_F) \
-T(tso_noff_l3l4csum, 1, 1, 0, 0, 1, 6, \
+T(tso_noff_l3l4csum, 0, 1, 1, 0, 0, 1, 6, \
TSO_F | NOFF_F | L3L4CSUM_F) \
-T(tso_noff_ol3ol4csum, 1, 1, 0, 1, 0, 6, \
+T(tso_noff_ol3ol4csum, 0, 1, 1, 0, 1, 0, 6, \
TSO_F | NOFF_F | OL3OL4CSUM_F) \
-T(tso_noff_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 6, \
+T(tso_noff_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 1, 1, 6, \
TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_noff_vlan, 1, 1, 1, 0, 0, 6, \
+T(tso_noff_vlan, 0, 1, 1, 1, 0, 0, 6, \
TSO_F | NOFF_F | VLAN_F) \
-T(tso_noff_vlan_l3l4csum, 1, 1, 1, 0, 1, 6, \
+T(tso_noff_vlan_l3l4csum, 0, 1, 1, 1, 0, 1, 6, \
TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(tso_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 0, 6, \
+T(tso_noff_vlan_ol3ol4csum, 0, 1, 1, 1, 1, 0, 6, \
TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 6, \
- TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
-
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
+T(tso_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 1, 6, \
+ TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts, 1, 0, 0, 0, 0, 0, 8, \
+ TSP_F) \
+T(ts_l3l4csum, 1, 0, 0, 0, 0, 1, 8, \
+ TSP_F | L3L4CSUM_F) \
+T(ts_ol3ol4csum, 1, 0, 0, 0, 1, 0, 8, \
+ TSP_F | OL3OL4CSUM_F) \
+T(ts_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 1, 1, 8, \
+ TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_vlan, 1, 0, 0, 1, 0, 0, 8, \
+ TSP_F | VLAN_F) \
+T(ts_vlan_l3l4csum, 1, 0, 0, 1, 0, 1, 8, \
+ TSP_F | VLAN_F | L3L4CSUM_F) \
+T(ts_vlan_ol3ol4csum, 1, 0, 0, 1, 1, 0, 8, \
+ TSP_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 1, 8, \
+ TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_noff, 1, 0, 1, 0, 0, 0, 8, \
+ TSP_F | NOFF_F) \
+T(ts_noff_l3l4csum, 1, 0, 1, 0, 0, 1, 8, \
+ TSP_F | NOFF_F | L3L4CSUM_F) \
+T(ts_noff_ol3ol4csum, 1, 0, 1, 0, 1, 0, 8, \
+ TSP_F | NOFF_F | OL3OL4CSUM_F) \
+T(ts_noff_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 1, 1, 8, \
+ TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_noff_vlan, 1, 0, 1, 1, 0, 0, 8, \
+ TSP_F | NOFF_F | VLAN_F) \
+T(ts_noff_vlan_l3l4csum, 1, 0, 1, 1, 0, 1, 8, \
+ TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(ts_noff_vlan_ol3ol4csum, 1, 0, 1, 1, 1, 0, 8, \
+ TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 1, 8, \
+ TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_tso, 1, 1, 0, 0, 0, 0, 8, \
+ TSP_F | TSO_F) \
+T(ts_tso_l3l4csum, 1, 1, 0, 0, 0, 1, 8, \
+ TSP_F | TSO_F | L3L4CSUM_F) \
+T(ts_tso_ol3ol4csum, 1, 1, 0, 0, 1, 0, 8, \
+ TSP_F | TSO_F | OL3OL4CSUM_F) \
+T(ts_tso_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 1, 1, 8, \
+ TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_tso_vlan, 1, 1, 0, 1, 0, 0, 8, \
+ TSP_F | TSO_F | VLAN_F) \
+T(ts_tso_vlan_l3l4csum, 1, 1, 0, 1, 0, 1, 8, \
+ TSP_F | TSO_F | VLAN_F | L3L4CSUM_F) \
+T(ts_tso_vlan_ol3ol4csum, 1, 1, 0, 1, 1, 0, 8, \
+ TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_tso_vlan_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 1, 8, \
+ TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_tso_noff, 1, 1, 1, 0, 0, 0, 8, \
+ TSP_F | TSO_F | NOFF_F) \
+T(ts_tso_noff_l3l4csum, 1, 1, 1, 0, 0, 1, 8, \
+ TSP_F | TSO_F | NOFF_F | L3L4CSUM_F) \
+T(ts_tso_noff_ol3ol4csum, 1, 1, 1, 0, 1, 0, 8, \
+ TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
+T(ts_tso_noff_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 1, 1, 8, \
+ TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_tso_noff_vlan, 1, 1, 1, 1, 0, 0, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F) \
+T(ts_tso_noff_vlan_l3l4csum, 1, 1, 1, 1, 0, 1, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(ts_tso_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 1, 0, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 1, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_##name( \
void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
\
#include "cn10k_ethdev.h"
#include "cn10k_tx.h"
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot \
cn10k_nix_xmit_pkts_mseg_##name(void *tx_queue, \
struct rte_mbuf **tx_pkts, \
#include "cn10k_ethdev.h"
#include "cn10k_tx.h"
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot \
cn10k_nix_xmit_pkts_vec_##name(void *tx_queue, \
struct rte_mbuf **tx_pkts, \
\
/* VLAN, TSTMP, TSO is not supported by vec */ \
if ((flags) & NIX_TX_OFFLOAD_VLAN_QINQ_F || \
+ (flags) & NIX_TX_OFFLOAD_TSTAMP_F || \
(flags) & NIX_TX_OFFLOAD_TSO_F) \
return 0; \
return cn10k_nix_xmit_pkts_vector(tx_queue, tx_pkts, pkts, cmd,\
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
return flags;
}
{
struct nix_send_ext_s *send_hdr_ext;
struct nix_send_hdr_s *send_hdr;
+ struct nix_send_mem_s *send_mem;
union nix_send_sg_s *sg;
- RTE_SET_USED(dev);
-
/* Initialize the fields based on basic single segment packet */
memset(&txq->cmd, 0, sizeof(txq->cmd));
send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+ if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ /* Default: one seg packet would have:
+ * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
+ * => 8/2 - 1 = 3
+ */
+ send_hdr->w0.sizem1 = 3;
+ send_hdr_ext->w0.tstmp = 1;
+
+ /* To calculate the offset for send_mem,
+ * send_hdr->w0.sizem1 * 2
+ */
+ send_mem = (struct nix_send_mem_s *)
+ (txq->cmd + (send_hdr->w0.sizem1 << 1));
+ send_mem->w0.cn9k.subdc = NIX_SUBDC_MEM;
+ send_mem->w0.cn9k.alg = NIX_SENDMEMALG_SETTSTMP;
+ send_mem->addr = dev->tstamp.tx_tstamp_iova;
+ }
sg = (union nix_send_sg_s *)&txq->cmd[4];
} else {
send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
rxq->wdata = cq->wdata;
rxq->head = cq->head;
rxq->qmask = cq->qmask;
+ rxq->tstamp = &dev->tstamp;
/* Data offset from data to start of mbuf is first_skip */
rxq->data_off = rq->first_skip;
cn9k_nix_dev_start(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
int rc;
/* Common eth dev start */
if (rc)
return rc;
+ /* Update VF about data off shifted by 8 bytes if PTP already
+ * enabled in PF owning this VF
+ */
+ if (dev->ptp_en && (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))))
+ nix_ptp_enable_vf(eth_dev);
+
/* Setting up the rx[tx]_offload_flags due to change
* in rx[tx]_offloads.
*/
uint32_t qmask;
uint32_t available;
uint16_t rq;
+ struct cnxk_timesync_info *tstamp;
} __plt_cache_aligned;
/* Rx and Tx routines */
#include "cn9k_ethdev.h"
#include "cn9k_rx.h"
-#define R(name, f3, f2, f1, f0, flags) \
+#define R(name, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
static inline void
pick_rx_func(struct rte_eth_dev *eth_dev,
- const eth_rx_burst_t rx_burst[2][2][2][2])
+ const eth_rx_burst_t rx_burst[2][2][2][2][2])
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- /* [MARK] [CKSUM] [PTYPE] [RSS] */
+ /* [TSP] [MARK] [CKSUM] [PTYPE] [RSS] */
eth_dev->rx_pkt_burst = rx_burst
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_CHECKSUM_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)]
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2] = {
-#define R(name, f3, f2, f1, f0, flags) \
- [f3][f2][f1][f0] = cn9k_nix_recv_pkts_##name,
+ const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2] = {
+#define R(name, f4, f3, f2, f1, f0, flags) \
+ [f4][f3][f2][f1][f0] = cn9k_nix_recv_pkts_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2] = {
-#define R(name, f3, f2, f1, f0, flags) \
- [f3][f2][f1][f0] = cn9k_nix_recv_pkts_mseg_##name,
+ const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2] = {
+#define R(name, f4, f3, f2, f1, f0, flags) \
+ [f4][f3][f2][f1][f0] = cn9k_nix_recv_pkts_mseg_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2] = {
-#define R(name, f3, f2, f1, f0, flags) \
- [f3][f2][f1][f0] = cn9k_nix_recv_pkts_vec_##name,
+ const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2] = {
+#define R(name, f4, f3, f2, f1, f0, flags) \
+ [f4][f3][f2][f1][f0] = cn9k_nix_recv_pkts_vec_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- if (dev->scalar_ena)
+ /* For PTP enabled, scalar rx function should be chosen as most of the
+ * PTP apps are implemented to rx burst 1 pkt.
+ */
+ if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
pick_rx_func(eth_dev, nix_eth_rx_burst);
else
pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
/* Copy multi seg version with no offload for tear down sequence */
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
dev->rx_pkt_burst_no_offload =
- nix_eth_rx_burst_mseg[0][0][0][0];
+ nix_eth_rx_burst_mseg[0][0][0][0][0];
rte_mb();
}
#include <rte_ether.h>
#include <rte_vect.h>
+#include <cnxk_ethdev.h>
+
#define NIX_RX_OFFLOAD_NONE (0)
#define NIX_RX_OFFLOAD_RSS_F BIT(0)
#define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
cn9k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
flags);
+ cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
+ (flags & NIX_RX_OFFLOAD_TSTAMP_F),
+ (uint64_t *)((uint8_t *)mbuf + data_off)
+ );
rx_pkts[packets++] = mbuf;
roc_prefetch_store_keep(mbuf);
head++;
#define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
#define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
#define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
-
-/* [MARK] [CKSUM] [PTYPE] [RSS] */
-#define NIX_RX_FASTPATH_MODES \
-R(no_offload, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
-R(rss, 0, 0, 0, 1, RSS_F) \
-R(ptype, 0, 0, 1, 0, PTYPE_F) \
-R(ptype_rss, 0, 0, 1, 1, PTYPE_F | RSS_F) \
-R(cksum, 0, 1, 0, 0, CKSUM_F) \
-R(cksum_rss, 0, 1, 0, 1, CKSUM_F | RSS_F) \
-R(cksum_ptype, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
-R(cksum_ptype_rss, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
-R(mark, 1, 0, 0, 0, MARK_F) \
-R(mark_rss, 1, 0, 0, 1, MARK_F | RSS_F) \
-R(mark_ptype, 1, 0, 1, 0, MARK_F | PTYPE_F) \
-R(mark_ptype_rss, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
-R(mark_cksum, 1, 1, 0, 0, MARK_F | CKSUM_F) \
-R(mark_cksum_rss, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
-R(mark_cksum_ptype, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F)\
-R(mark_cksum_ptype_rss, 1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)
-
-#define R(name, f3, f2, f1, f0, flags) \
+#define TS_F NIX_RX_OFFLOAD_TSTAMP_F
+
+/* [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
+#define NIX_RX_FASTPATH_MODES \
+R(no_offload, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
+R(rss, 0, 0, 0, 0, 1, RSS_F) \
+R(ptype, 0, 0, 0, 1, 0, PTYPE_F) \
+R(ptype_rss, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
+R(cksum, 0, 0, 1, 0, 0, CKSUM_F) \
+R(cksum_rss, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
+R(cksum_ptype, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
+R(cksum_ptype_rss, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
+R(mark, 0, 1, 0, 0, 0, MARK_F) \
+R(mark_rss, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
+R(mark_ptype, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
+R(mark_ptype_rss, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
+R(mark_cksum, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
+R(mark_cksum_rss, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
+R(mark_cksum_ptype, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
+R(mark_cksum_ptype_rss, 0, 1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)\
+R(ts, 1, 0, 0, 0, 0, TS_F) \
+R(ts_rss, 1, 0, 0, 0, 1, TS_F | RSS_F) \
+R(ts_ptype, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
+R(ts_ptype_rss, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
+R(ts_cksum, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
+R(ts_cksum_rss, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
+R(ts_cksum_ptype, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
+R(ts_cksum_ptype_rss, 1, 0, 1, 1, 1, TS_F | CKSUM_F | PTYPE_F | RSS_F)\
+R(ts_mark, 1, 1, 0, 0, 0, TS_F | MARK_F) \
+R(ts_mark_rss, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
+R(ts_mark_ptype, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
+R(ts_mark_ptype_rss, 1, 1, 0, 1, 1, TS_F | MARK_F | PTYPE_F | RSS_F)\
+R(ts_mark_cksum, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
+R(ts_mark_cksum_rss, 1, 1, 1, 0, 1, TS_F | MARK_F | CKSUM_F | RSS_F)\
+R(ts_mark_cksum_ptype, 1, 1, 1, 1, 0, TS_F | MARK_F | CKSUM_F | PTYPE_F)\
+R(ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
+
+#define R(name, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
\
#include "cn9k_ethdev.h"
#include "cn9k_rx.h"
-#define R(name, f3, f2, f1, f0, flags) \
+#define R(name, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
#include "cn9k_ethdev.h"
#include "cn9k_rx.h"
-#define R(name, f3, f2, f1, f0, flags) \
+#define R(name, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name( \
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
+ /* TSTMP is not supported by vector */ \
+ if ((flags) & NIX_RX_OFFLOAD_TSTAMP_F) \
+ return 0; \
return cn9k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts, \
(flags)); \
}
#include "cn9k_ethdev.h"
#include "cn9k_tx.h"
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name( \
void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts) \
{ \
static inline void
pick_tx_func(struct rte_eth_dev *eth_dev,
- const eth_tx_burst_t tx_burst[2][2][2][2][2])
+ const eth_tx_burst_t tx_burst[2][2][2][2][2][2])
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- /* [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
+ /* [TS] [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
eth_dev->tx_pkt_burst = tx_burst
+ [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F)]
[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)]
[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2] = {
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
- [f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_##name,
+ const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2] = {
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_##name,
NIX_TX_FASTPATH_MODES
#undef T
};
- const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2] = {
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
- [f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_mseg_##name,
+ const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2] = {
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_mseg_##name,
NIX_TX_FASTPATH_MODES
#undef T
};
- const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2] = {
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
- [f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_vec_##name,
+ const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2] = {
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_vec_##name,
NIX_TX_FASTPATH_MODES
#undef T
if (dev->scalar_ena ||
(dev->tx_offload_flags &
- (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)))
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F |
+ NIX_TX_OFFLOAD_TSO_F)))
pick_tx_func(eth_dev, nix_eth_tx_burst);
else
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
#define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
#define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
#define NIX_TX_OFFLOAD_TSO_F BIT(4)
+#define NIX_TX_OFFLOAD_TSTAMP_F BIT(5)
/* Flags to control xmit_prepare function.
* Defining it from backwards to denote its been
NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
#define NIX_TX_NEED_EXT_HDR \
- (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F | \
+ NIX_TX_OFFLOAD_TSO_F)
#define NIX_XMIT_FC_OR_RETURN(txq, pkts) \
do { \
static __rte_always_inline int
cn9k_nix_tx_ext_subs(const uint16_t flags)
{
- return (flags &
- (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ? 1 : 0;
+ return (flags & NIX_TX_OFFLOAD_TSTAMP_F)
+ ? 2
+ : ((flags &
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F))
+ ? 1
+ : 0);
}
static __rte_always_inline void
}
}
+static __rte_always_inline void
+cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
+ const uint64_t ol_flags, const uint16_t no_segdw,
+ const uint16_t flags)
+{
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ struct nix_send_mem_s *send_mem;
+ uint16_t off = (no_segdw - 1) << 1;
+ const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
+
+ send_mem = (struct nix_send_mem_s *)(cmd + off);
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ /* Retrieving the default desc values */
+ cmd[off] = send_mem_desc[6];
+
+ /* Using compiler barier to avoid voilation of C
+ * aliasing rules.
+ */
+ rte_compiler_barrier();
+ }
+
+ /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
+ * should not be recorded, hence changing the alg type to
+ * NIX_SENDMEMALG_SET and also changing send mem addr field to
+ * next 8 bytes as it corrpt the actual tx tstamp registered
+ * address.
+ */
+ send_mem->w0.cn9k.alg =
+ NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);
+
+ send_mem->addr = (rte_iova_t)((uint64_t *)send_mem_desc[7] +
+ (is_ol_tstamp));
+ }
+}
+
static __rte_always_inline void
cn9k_nix_xmit_one(uint64_t *cmd, void *lmt_addr, const rte_iova_t io_addr,
const uint32_t flags)
/* Roundup extra dwords to multiple of 2 */
segdw = (segdw >> 1) + (segdw & 0x1);
/* Default dwords */
- segdw += (off >> 1) + 1;
+ segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
send_hdr->w0.sizem1 = segdw - 1;
return segdw;
for (i = 0; i < pkts; i++) {
cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
+ cn9k_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
+ tx_pkts[i]->ol_flags, 4, flags);
cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
}
for (i = 0; i < pkts; i++) {
cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
segdw = cn9k_nix_prepare_mseg(tx_pkts[i], cmd, flags);
+ cn9k_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
+ tx_pkts[i]->ol_flags, segdw,
+ flags);
cn9k_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
}
#define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F
#define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F
#define TSO_F NIX_TX_OFFLOAD_TSO_F
-
-/* [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
-#define NIX_TX_FASTPATH_MODES \
-T(no_offload, 0, 0, 0, 0, 0, 4, \
- NIX_TX_OFFLOAD_NONE) \
-T(l3l4csum, 0, 0, 0, 0, 1, 4, \
- L3L4CSUM_F) \
-T(ol3ol4csum, 0, 0, 0, 1, 0, 4, \
- OL3OL4CSUM_F) \
-T(ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 4, \
- OL3OL4CSUM_F | L3L4CSUM_F) \
-T(vlan, 0, 0, 1, 0, 0, 6, \
- VLAN_F) \
-T(vlan_l3l4csum, 0, 0, 1, 0, 1, 6, \
- VLAN_F | L3L4CSUM_F) \
-T(vlan_ol3ol4csum, 0, 0, 1, 1, 0, 6, \
- VLAN_F | OL3OL4CSUM_F) \
-T(vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 6, \
- VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(noff, 0, 1, 0, 0, 0, 4, \
- NOFF_F) \
-T(noff_l3l4csum, 0, 1, 0, 0, 1, 4, \
- NOFF_F | L3L4CSUM_F) \
-T(noff_ol3ol4csum, 0, 1, 0, 1, 0, 4, \
- NOFF_F | OL3OL4CSUM_F) \
-T(noff_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 4, \
- NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(noff_vlan, 0, 1, 1, 0, 0, 6, \
- NOFF_F | VLAN_F) \
-T(noff_vlan_l3l4csum, 0, 1, 1, 0, 1, 6, \
- NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(noff_vlan_ol3ol4csum, 0, 1, 1, 1, 0, 6, \
- NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 6, \
- NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso, 1, 0, 0, 0, 0, 6, \
- TSO_F) \
-T(tso_l3l4csum, 1, 0, 0, 0, 1, 6, \
- TSO_F | L3L4CSUM_F) \
-T(tso_ol3ol4csum, 1, 0, 0, 1, 0, 6, \
- TSO_F | OL3OL4CSUM_F) \
-T(tso_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 6, \
- TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_vlan, 1, 0, 1, 0, 0, 6, \
- TSO_F | VLAN_F) \
-T(tso_vlan_l3l4csum, 1, 0, 1, 0, 1, 6, \
- TSO_F | VLAN_F | L3L4CSUM_F) \
-T(tso_vlan_ol3ol4csum, 1, 0, 1, 1, 0, 6, \
- TSO_F | VLAN_F | OL3OL4CSUM_F) \
-T(tso_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 6, \
- TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_noff, 1, 1, 0, 0, 0, 6, \
- TSO_F | NOFF_F) \
-T(tso_noff_l3l4csum, 1, 1, 0, 0, 1, 6, \
- TSO_F | NOFF_F | L3L4CSUM_F) \
-T(tso_noff_ol3ol4csum, 1, 1, 0, 1, 0, 6, \
- TSO_F | NOFF_F | OL3OL4CSUM_F) \
-T(tso_noff_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 6, \
- TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_noff_vlan, 1, 1, 1, 0, 0, 6, \
- TSO_F | NOFF_F | VLAN_F) \
-T(tso_noff_vlan_l3l4csum, 1, 1, 1, 0, 1, 6, \
- TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(tso_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 0, 6, \
- TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 6, \
- TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
-
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
+#define TSP_F NIX_TX_OFFLOAD_TSTAMP_F
+
+/* [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
+#define NIX_TX_FASTPATH_MODES \
+T(no_offload, 0, 0, 0, 0, 0, 0, 4, \
+ NIX_TX_OFFLOAD_NONE) \
+T(l3l4csum, 0, 0, 0, 0, 0, 1, 4, \
+ L3L4CSUM_F) \
+T(ol3ol4csum, 0, 0, 0, 0, 1, 0, 4, \
+ OL3OL4CSUM_F) \
+T(ol3ol4csum_l3l4csum, 0, 0, 0, 0, 1, 1, 4, \
+ OL3OL4CSUM_F | L3L4CSUM_F) \
+T(vlan, 0, 0, 0, 1, 0, 0, 6, \
+ VLAN_F) \
+T(vlan_l3l4csum, 0, 0, 0, 1, 0, 1, 6, \
+ VLAN_F | L3L4CSUM_F) \
+T(vlan_ol3ol4csum, 0, 0, 0, 1, 1, 0, 6, \
+ VLAN_F | OL3OL4CSUM_F) \
+T(vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 1, 6, \
+ VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(noff, 0, 0, 1, 0, 0, 0, 4, \
+ NOFF_F) \
+T(noff_l3l4csum, 0, 0, 1, 0, 0, 1, 4, \
+ NOFF_F | L3L4CSUM_F) \
+T(noff_ol3ol4csum, 0, 0, 1, 0, 1, 0, 4, \
+ NOFF_F | OL3OL4CSUM_F) \
+T(noff_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 1, 1, 4, \
+ NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(noff_vlan, 0, 0, 1, 1, 0, 0, 6, \
+ NOFF_F | VLAN_F) \
+T(noff_vlan_l3l4csum, 0, 0, 1, 1, 0, 1, 6, \
+ NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(noff_vlan_ol3ol4csum, 0, 0, 1, 1, 1, 0, 6, \
+ NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 1, 6, \
+ NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso, 0, 1, 0, 0, 0, 0, 6, \
+ TSO_F) \
+T(tso_l3l4csum, 0, 1, 0, 0, 0, 1, 6, \
+ TSO_F | L3L4CSUM_F) \
+T(tso_ol3ol4csum, 0, 1, 0, 0, 1, 0, 6, \
+ TSO_F | OL3OL4CSUM_F) \
+T(tso_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 1, 1, 6, \
+ TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_vlan, 0, 1, 0, 1, 0, 0, 6, \
+ TSO_F | VLAN_F) \
+T(tso_vlan_l3l4csum, 0, 1, 0, 1, 0, 1, 6, \
+ TSO_F | VLAN_F | L3L4CSUM_F) \
+T(tso_vlan_ol3ol4csum, 0, 1, 0, 1, 1, 0, 6, \
+ TSO_F | VLAN_F | OL3OL4CSUM_F) \
+T(tso_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 1, 6, \
+ TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_noff, 0, 1, 1, 0, 0, 0, 6, \
+ TSO_F | NOFF_F) \
+T(tso_noff_l3l4csum, 0, 1, 1, 0, 0, 1, 6, \
+ TSO_F | NOFF_F | L3L4CSUM_F) \
+T(tso_noff_ol3ol4csum, 0, 1, 1, 0, 1, 0, 6, \
+ TSO_F | NOFF_F | OL3OL4CSUM_F) \
+T(tso_noff_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 1, 1, 6, \
+ TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_noff_vlan, 0, 1, 1, 1, 0, 0, 6, \
+ TSO_F | NOFF_F | VLAN_F) \
+T(tso_noff_vlan_l3l4csum, 0, 1, 1, 1, 0, 1, 6, \
+ TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(tso_noff_vlan_ol3ol4csum, 0, 1, 1, 1, 1, 0, 6, \
+ TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(tso_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 1, 6, \
+ TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts, 1, 0, 0, 0, 0, 0, 8, \
+ TSP_F) \
+T(ts_l3l4csum, 1, 0, 0, 0, 0, 1, 8, \
+ TSP_F | L3L4CSUM_F) \
+T(ts_ol3ol4csum, 1, 0, 0, 0, 1, 0, 8, \
+ TSP_F | OL3OL4CSUM_F) \
+T(ts_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 1, 1, 8, \
+ TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_vlan, 1, 0, 0, 1, 0, 0, 8, \
+ TSP_F | VLAN_F) \
+T(ts_vlan_l3l4csum, 1, 0, 0, 1, 0, 1, 8, \
+ TSP_F | VLAN_F | L3L4CSUM_F) \
+T(ts_vlan_ol3ol4csum, 1, 0, 0, 1, 1, 0, 8, \
+ TSP_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 1, 8, \
+ TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_noff, 1, 0, 1, 0, 0, 0, 8, \
+ TSP_F | NOFF_F) \
+T(ts_noff_l3l4csum, 1, 0, 1, 0, 0, 1, 8, \
+ TSP_F | NOFF_F | L3L4CSUM_F) \
+T(ts_noff_ol3ol4csum, 1, 0, 1, 0, 1, 0, 8, \
+ TSP_F | NOFF_F | OL3OL4CSUM_F) \
+T(ts_noff_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 1, 1, 8, \
+ TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_noff_vlan, 1, 0, 1, 1, 0, 0, 8, \
+ TSP_F | NOFF_F | VLAN_F) \
+T(ts_noff_vlan_l3l4csum, 1, 0, 1, 1, 0, 1, 8, \
+ TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(ts_noff_vlan_ol3ol4csum, 1, 0, 1, 1, 1, 0, 8, \
+ TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 1, 8, \
+ TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_tso, 1, 1, 0, 0, 0, 0, 8, \
+ TSP_F | TSO_F) \
+T(ts_tso_l3l4csum, 1, 1, 0, 0, 0, 1, 8, \
+ TSP_F | TSO_F | L3L4CSUM_F) \
+T(ts_tso_ol3ol4csum, 1, 1, 0, 0, 1, 0, 8, \
+ TSP_F | TSO_F | OL3OL4CSUM_F) \
+T(ts_tso_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 1, 1, 8, \
+ TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_tso_vlan, 1, 1, 0, 1, 0, 0, 8, \
+ TSP_F | TSO_F | VLAN_F) \
+T(ts_tso_vlan_l3l4csum, 1, 1, 0, 1, 0, 1, 8, \
+ TSP_F | TSO_F | VLAN_F | L3L4CSUM_F) \
+T(ts_tso_vlan_ol3ol4csum, 1, 1, 0, 1, 1, 0, 8, \
+ TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_tso_vlan_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 1, 8, \
+ TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_tso_noff, 1, 1, 1, 0, 0, 0, 8, \
+ TSP_F | TSO_F | NOFF_F) \
+T(ts_tso_noff_l3l4csum, 1, 1, 1, 0, 0, 1, 8, \
+ TSP_F | TSO_F | NOFF_F | L3L4CSUM_F) \
+T(ts_tso_noff_ol3ol4csum, 1, 1, 1, 0, 1, 0, 8, \
+ TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
+T(ts_tso_noff_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 1, 1, 8, \
+ TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_tso_noff_vlan, 1, 1, 1, 1, 0, 0, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F) \
+T(ts_tso_noff_vlan_l3l4csum, 1, 1, 1, 1, 0, 1, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(ts_tso_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 1, 0, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 1, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name( \
void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
\
#include "cn9k_ethdev.h"
#include "cn9k_tx.h"
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot \
cn9k_nix_xmit_pkts_mseg_##name(void *tx_queue, \
struct rte_mbuf **tx_pkts, \
#include "cn9k_ethdev.h"
#include "cn9k_tx.h"
-#define T(name, f4, f3, f2, f1, f0, sz, flags) \
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot \
cn9k_nix_xmit_pkts_vec_##name(void *tx_queue, \
struct rte_mbuf **tx_pkts, \
\
/* VLAN, TSTMP, TSO is not supported by vec */ \
if ((flags) & NIX_TX_OFFLOAD_VLAN_QINQ_F || \
+ (flags) & NIX_TX_OFFLOAD_TSTAMP_F || \
(flags) & NIX_TX_OFFLOAD_TSO_F) \
return 0; \
return cn9k_nix_xmit_pkts_vector(tx_queue, tx_pkts, pkts, cmd, \
offsetof(struct rte_mbuf, data_off) !=
6);
mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM +
+ (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
mb_def.port = port_id;
rte_mbuf_refcnt_set(&mb_def, 1);
eth_dev->data->rx_queues[qid] = rxq_sp + 1;
eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /* Calculating delta and freq mult between PTP HI clock and tsc.
+ * These are needed in deriving raw clock value from tsc counter.
+ * read_clock eth op returns raw clock value.
+ */
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+ rc = cnxk_nix_tsc_convert(dev);
+ if (rc) {
+ plt_err("Failed to calculate delta and freq mult");
+ goto rq_fini;
+ }
+ }
+
return 0;
rq_fini:
rc |= roc_nix_rq_fini(rq);
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
int rc, i;
- if (eth_dev->data->nb_rx_queues != 0) {
+ if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
rc = nix_recalc_mtu(eth_dev);
if (rc)
return rc;
}
}
+ /* Enable PTP if it is requested by the user or already
+ * enabled on PF owning this VF
+ */
+ memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+ cnxk_eth_dev_ops.timesync_enable(eth_dev);
+ else
+ cnxk_eth_dev_ops.timesync_disable(eth_dev);
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ rc = rte_mbuf_dyn_rx_timestamp_register
+ (&dev->tstamp.tstamp_dynfield_offset,
+ &dev->tstamp.rx_tstamp_dynflag);
+ if (rc != 0) {
+ plt_err("Failed to register Rx timestamp field/flag");
+ goto rx_disable;
+ }
+ }
+
cnxk_nix_toggle_flag_link_cfg(dev, false);
return 0;
#include <rte_mbuf.h>
#include <rte_mbuf_pool_ops.h>
#include <rte_mempool.h>
+#include <rte_time.h>
#include "roc_api.h"
(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP)
#define RSS_IPV4_ENABLE \
(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \
/* Default mark value used when none is provided. */
#define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff
+/* Default cycle counter mask */
+#define CNXK_CYCLECOUNTER_MASK 0xffffffffffffffffULL
#define CNXK_NIX_TIMESYNC_RX_OFFSET 8
+
#define PTYPE_NON_TUNNEL_WIDTH 16
#define PTYPE_TUNNEL_WIDTH 12
#define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
uint8_t valid;
};
+struct cnxk_timesync_info {
+ uint64_t rx_tstamp_dynflag;
+ rte_iova_t tx_tstamp_iova;
+ uint64_t *tx_tstamp;
+ uint64_t rx_tstamp;
+ int tstamp_dynfield_offset;
+ uint8_t tx_ready;
+ uint8_t rx_ready;
+} __plt_cache_aligned;
+
struct cnxk_eth_dev {
/* ROC NIX */
struct roc_nix nix;
/* Flow control configuration */
struct cnxk_fc_cfg fc_cfg;
+ /* PTP Counters */
+ struct cnxk_timesync_info tstamp;
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
+ double clk_freq_mult;
+ uint64_t clk_delta;
+
/* Rx burst for cleanup(Only Primary) */
eth_rx_burst_t rx_pkt_burst_no_offload;
int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);
int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev);
+int cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev);
+int cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev);
+int cnxk_nix_tsc_convert(struct cnxk_eth_dev *dev);
uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
return 1;
}
+static inline rte_mbuf_timestamp_t *
+cnxk_nix_timestamp_dynfield(struct rte_mbuf *mbuf,
+ struct cnxk_timesync_info *info)
+{
+ return RTE_MBUF_DYNFIELD(mbuf, info->tstamp_dynfield_offset,
+ rte_mbuf_timestamp_t *);
+}
+
+static __rte_always_inline void
+cnxk_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
+ struct cnxk_timesync_info *tstamp, bool ts_enable,
+ uint64_t *tstamp_ptr)
+{
+ if (ts_enable &&
+ (mbuf->data_off ==
+ RTE_PKTMBUF_HEADROOM + CNXK_NIX_TIMESYNC_RX_OFFSET)) {
+ mbuf->pkt_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
+
+ /* Reading the rx timestamp inserted by CGX, viz at
+ * starting of the packet data.
+ */
+ *cnxk_nix_timestamp_dynfield(mbuf, tstamp) =
+ rte_be_to_cpu_64(*tstamp_ptr);
+ /* PKT_RX_IEEE1588_TMST flag needs to be set only in case
+ * PTP packets are received.
+ */
+ if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) {
+ tstamp->rx_tstamp =
+ *cnxk_nix_timestamp_dynfield(mbuf, tstamp);
+ tstamp->rx_ready = 1;
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP |
+ PKT_RX_IEEE1588_TMST |
+ tstamp->rx_tstamp_dynflag;
+ }
+ }
+}
+
#endif /* __CNXK_ETHDEV_H__ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cnxk_ethdev.h"
+
+/* This function calculates two parameters "clk_freq_mult" and
+ * "clk_delta" which is useful in deriving PTP HI clock from
+ * timestamp counter (tsc) value.
+ */
+int
+cnxk_nix_tsc_convert(struct cnxk_eth_dev *dev)
+{
+ uint64_t ticks_base = 0, ticks = 0, tsc = 0, t_freq;
+ struct roc_nix *nix = &dev->nix;
+ int rc, val;
+
+ /* Calculating the frequency at which PTP HI clock is running */
+ rc = roc_nix_ptp_clock_read(nix, &ticks_base, &tsc, false);
+ if (rc) {
+ plt_err("Failed to read the raw clock value: %d", rc);
+ goto fail;
+ }
+
+ rte_delay_ms(100);
+
+ rc = roc_nix_ptp_clock_read(nix, &ticks, &tsc, false);
+ if (rc) {
+ plt_err("Failed to read the raw clock value: %d", rc);
+ goto fail;
+ }
+
+ t_freq = (ticks - ticks_base) * 10;
+
+ /* Calculating the freq multiplier viz the ratio between the
+ * frequency at which PTP HI clock works and tsc clock runs
+ */
+ dev->clk_freq_mult =
+ (double)pow(10, floor(log10(t_freq))) / rte_get_timer_hz();
+
+ val = false;
+#ifdef RTE_ARM_EAL_RDTSC_USE_PMU
+ val = true;
+#endif
+ rc = roc_nix_ptp_clock_read(nix, &ticks, &tsc, val);
+ if (rc) {
+ plt_err("Failed to read the raw clock value: %d", rc);
+ goto fail;
+ }
+
+ /* Calculating delta between PTP HI clock and tsc */
+ dev->clk_delta = ((uint64_t)(ticks / dev->clk_freq_mult) - tsc);
+
+fail:
+ return rc;
+}
+
+int
+cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_timesync_info *tstamp = &dev->tstamp;
+ struct roc_nix *nix = &dev->nix;
+ const struct rte_memzone *ts;
+ int rc = 0;
+
+ /* If we are VF/SDP/LBK, ptp cannot not be enabled */
+ if (roc_nix_is_vf_or_sdp(nix) || roc_nix_is_lbk(nix)) {
+ plt_err("PTP cannot be enabled for VF/SDP/LBK");
+ return -EINVAL;
+ }
+
+ if (dev->ptp_en)
+ return rc;
+
+ if (dev->ptype_disable) {
+ plt_err("Ptype offload is disabled, it should be enabled");
+ return -EINVAL;
+ }
+
+ if (dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
+ plt_err("Both PTP and switch header cannot be enabled");
+ return -EINVAL;
+ }
+
+ /* Allocating a iova address for tx tstamp */
+ ts = rte_eth_dma_zone_reserve(eth_dev, "cnxk_ts", 0, 128, 128, 0);
+ if (ts == NULL) {
+ plt_err("Failed to allocate mem for tx tstamp addr");
+ return -ENOMEM;
+ }
+
+ tstamp->tx_tstamp_iova = ts->iova;
+ tstamp->tx_tstamp = ts->addr;
+
+ rc = rte_mbuf_dyn_rx_timestamp_register(&tstamp->tstamp_dynfield_offset,
+ &tstamp->rx_tstamp_dynflag);
+ if (rc) {
+ plt_err("Failed to register Rx timestamp field/flag");
+ goto error;
+ }
+
+ /* System time should be already on by default */
+ memset(&dev->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&dev->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&dev->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ dev->systime_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
+ dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
+ dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
+
+ dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+
+ rc = roc_nix_ptp_rx_ena_dis(nix, true);
+ if (!rc) {
+ rc = roc_nix_ptp_tx_ena_dis(nix, true);
+ if (rc) {
+ roc_nix_ptp_rx_ena_dis(nix, false);
+ goto error;
+ }
+ }
+
+ rc = nix_recalc_mtu(eth_dev);
+ if (rc) {
+ plt_err("Failed to set MTU size for ptp");
+ goto error;
+ }
+
+ return rc;
+
+error:
+ rte_eth_dma_zone_free(eth_dev, "cnxk_ts", 0);
+ dev->tstamp.tx_tstamp_iova = 0;
+ dev->tstamp.tx_tstamp = NULL;
+ return rc;
+}
+
+int
+cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
+ struct roc_nix *nix = &dev->nix;
+ int rc = 0;
+
+ /* If we are VF/SDP/LBK, ptp cannot not be disabled */
+ if (roc_nix_is_vf_or_sdp(nix) || roc_nix_is_lbk(nix))
+ return -EINVAL;
+
+ if (!dev->ptp_en)
+ return rc;
+
+ dev->rx_offloads &= ~rx_offloads;
+
+ rc = roc_nix_ptp_rx_ena_dis(nix, false);
+ if (!rc) {
+ rc = roc_nix_ptp_tx_ena_dis(nix, false);
+ if (rc) {
+ roc_nix_ptp_rx_ena_dis(nix, true);
+ return rc;
+ }
+ }
+
+ rc = nix_recalc_mtu(eth_dev);
+ if (rc)
+ plt_err("Failed to set MTU size for ptp");
+
+ return rc;
+}
'cnxk_ethdev_ops.c',
'cnxk_link.c',
'cnxk_lookup.c',
+ 'cnxk_ptp.c',
'cnxk_rte_flow.c',
'cnxk_stats.c',
)