#define OTX2_SSO_SQB_LIMIT (0x180)
#define OTX2_SSO_XAQ_SLACK (8)
#define OTX2_SSO_XAQ_CACHE_CNT (0x7)
+#define OTX2_SSO_WQE_SG_PTR (9)
/* SSO LF register offsets (BAR2) */
#define SSO_LF_GGRP_OP_ADD_WORK0 (0x0ull)
const void * const lookup_mem)
{
struct nix_wqe_hdr_s *wqe = (struct nix_wqe_hdr_s *)get_work1;
+ uint64_t val = mbuf_init.value | (uint64_t)port_id << 48;
+
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+ val |= NIX_TIMESYNC_RX_OFFSET;
otx2_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
(struct rte_mbuf *)mbuf, lookup_mem,
- mbuf_init.value | (uint64_t)port_id << 48, flags);
+ val, flags);
}
const uint32_t flags, const void * const lookup_mem)
{
union otx2_sso_event event;
+ uint64_t tstamp_ptr;
uint64_t get_work1;
uint64_t mbuf;
otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
(uint32_t) event.get_work0, flags, lookup_mem);
/* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
+ + OTX2_SSO_WQE_SG_PTR);
otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
- flags);
+ flags, (uint64_t *)tstamp_ptr);
get_work1 = mbuf;
}
const uint32_t flags)
{
union otx2_sso_event event;
+ uint64_t tstamp_ptr;
uint64_t get_work1;
uint64_t mbuf;
otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
(uint32_t) event.get_work0, flags, NULL);
/* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
+ + OTX2_SSO_WQE_SG_PTR);
otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
- flags);
+ flags, (uint64_t *)tstamp_ptr);
get_work1 = mbuf;
}
{
const uint64_t set_gw = BIT_ULL(16) | 1;
union otx2_sso_event event;
+ uint64_t tstamp_ptr;
uint64_t get_work1;
uint64_t mbuf;
event.event_type == RTE_EVENT_TYPE_ETHDEV) {
otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
(uint32_t) event.get_work0, flags, lookup_mem);
- /* Extracting tstamp, if PTP enabled*/
- otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, flags);
+ /* Extracting tstamp, if PTP enabled. CGX will prepend the
+ * timestamp at starting of packet data and it can be derieved
+ * from WQE 9 dword which corresponds to SG iova.
+ * rte_pktmbuf_mtod_offset can be used for this purpose but it
+ * brings down the performance as it reads mbuf->buf_addr which
+ * is not part of cache in general fast path.
+ */
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
+ + OTX2_SSO_WQE_SG_PTR);
+ otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, flags,
+ (uint64_t *)tstamp_ptr);
get_work1 = mbuf;
}
send_mem = (struct nix_send_mem_s *)(txq->cmd +
(send_hdr->w0.sizem1 << 1));
send_mem->subdc = NIX_SUBDC_MEM;
- send_mem->dsz = 0x0;
- send_mem->wmem = 0x1;
send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
}
otx2_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
flags);
- otx2_nix_mbuf_to_tstamp(mbuf, rxq->tstamp, flags);
+ otx2_nix_mbuf_to_tstamp(mbuf, rxq->tstamp, flags,
+ (uint64_t *)((uint8_t *)mbuf + data_off));
rx_pkts[packets++] = mbuf;
otx2_prefetch_store_keep(mbuf);
head++;
static __rte_always_inline void
otx2_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
- struct otx2_timesync_info *tstamp, const uint16_t flag)
+ struct otx2_timesync_info *tstamp, const uint16_t flag,
+ uint64_t *tstamp_ptr)
{
if ((flag & NIX_RX_OFFLOAD_TSTAMP_F) &&
- mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC &&
(mbuf->data_off == RTE_PKTMBUF_HEADROOM +
NIX_TIMESYNC_RX_OFFSET)) {
- uint64_t *tstamp_ptr;
- /* Deal with rx timestamp */
- tstamp_ptr = rte_pktmbuf_mtod_offset(mbuf, uint64_t *,
- -NIX_TIMESYNC_RX_OFFSET);
+ /* Reading the rx timestamp inserted by CGX, viz at
+ * starting of the packet data.
+ */
mbuf->timestamp = rte_be_to_cpu_64(*tstamp_ptr);
- tstamp->rx_tstamp = mbuf->timestamp;
- tstamp->rx_ready = 1;
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST
- | PKT_RX_TIMESTAMP;
+ /* PKT_RX_IEEE1588_TMST flag needs to be set only in case
+ * PTP packets are received.
+ */
+ if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) {
+ tstamp->rx_tstamp = mbuf->timestamp;
+ tstamp->rx_ready = 1;
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP |
+ PKT_RX_IEEE1588_TMST | PKT_RX_TIMESTAMP;
+ }
}
}
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
struct nix_send_mem_s *send_mem;
uint16_t off = (no_segdw - 1) << 1;
+ const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
send_mem = (struct nix_send_mem_s *)(cmd + off);
- if (flags & NIX_TX_MULTI_SEG_F)
+ if (flags & NIX_TX_MULTI_SEG_F) {
/* Retrieving the default desc values */
cmd[off] = send_mem_desc[6];
+ /* Using compiler barier to avoid voilation of C
+ * aliasing rules.
+ */
+ rte_compiler_barrier();
+ }
+
/* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
- * should not be updated at tx tstamp registered address, rather
- * a dummy address which is eight bytes ahead would be updated
+ * should not be recorded, hence changing the alg type to
+ * NIX_SENDMEMALG_SET and also changing send mem addr field to
+ * next 8 bytes as it corrpt the actual tx tstamp registered
+ * address.
*/
+ send_mem->alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);
+
send_mem->addr = (rte_iova_t)((uint64_t *)send_mem_desc[7] +
- !(ol_flags & PKT_TX_IEEE1588_TMST));
+ (is_ol_tstamp));
}
}