if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rxq->hw_register_set = 1;
+
/**
* Scan LOOK_AHEAD descriptors at a time to determine which
* descriptors reference packets that are ready to be received.
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- ts_ns = ice_tstamp_convert_32b_64b(hw,
+ if (ice_timestamp_dynflag > 0) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
+ rxq->hw_register_set,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
- if (ice_timestamp_dynflag > 0) {
- *RTE_MBUF_DYNFIELD(mb,
- ice_timestamp_dynfield_offset,
- rte_mbuf_timestamp_t *) = ts_ns;
- mb->ol_flags |= ice_timestamp_dynflag;
- }
+ rxq->hw_register_set = 0;
+ *RTE_MBUF_DYNFIELD(mb,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ mb->ol_flags |= ice_timestamp_dynflag;
}
if (ad->ptp_ena && ((mb->packet_type &
uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
#endif
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rxq->hw_register_set = 1;
+
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- ts_ns = ice_tstamp_convert_32b_64b(hw,
+ if (ice_timestamp_dynflag > 0) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
+ rxq->hw_register_set,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- if (ice_timestamp_dynflag > 0) {
- *RTE_MBUF_DYNFIELD(first_seg,
- ice_timestamp_dynfield_offset,
- rte_mbuf_timestamp_t *) = ts_ns;
- first_seg->ol_flags |= ice_timestamp_dynflag;
- }
+ rxq->hw_register_set = 0;
+ *RTE_MBUF_DYNFIELD(first_seg,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ first_seg->ol_flags |= ice_timestamp_dynflag;
}
if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
#endif
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rxq->hw_register_set = 1;
+
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- ts_ns = ice_tstamp_convert_32b_64b(hw,
+ if (ice_timestamp_dynflag > 0) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
+ rxq->hw_register_set,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- if (ice_timestamp_dynflag > 0) {
- *RTE_MBUF_DYNFIELD(rxm,
- ice_timestamp_dynfield_offset,
- rte_mbuf_timestamp_t *) = ts_ns;
- rxm->ol_flags |= ice_timestamp_dynflag;
- }
+ rxq->hw_register_set = 0;
+ *RTE_MBUF_DYNFIELD(rxm,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ rxm->ol_flags |= ice_timestamp_dynflag;
}
if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
ice_rx_release_mbufs_t rx_rel_mbufs;
uint64_t offloads;
uint32_t time_high;
+ uint32_t hw_register_set;
const struct rte_memzone *mz;
};
}
}
+#define ICE_TIMESYNC_REG_WRAP_GUARD_BAND 10000
+
/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
static inline
-uint64_t ice_tstamp_convert_32b_64b(struct ice_hw *hw, uint32_t in_timestamp)
+uint64_t ice_tstamp_convert_32b_64b(struct ice_hw *hw, struct ice_adapter *ad,
+ uint32_t flag, uint32_t in_timestamp)
{
const uint64_t mask = 0xFFFFFFFF;
uint32_t hi, lo, lo2, delta;
- uint64_t time, ns;
-
- lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
- hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
- lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ uint64_t ns;
- if (lo2 < lo) {
+ if (flag) {
lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
- }
- time = ((uint64_t)hi << 32) | lo;
+ /*
+ * On typical system, the delta between lo and lo2 is ~1000ns,
+ * so 10000 seems a large-enough but not overly-big guard band.
+ */
+ if (lo > (UINT32_MAX - ICE_TIMESYNC_REG_WRAP_GUARD_BAND))
+ lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ else
+ lo2 = lo;
+
+ if (lo2 < lo) {
+ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+ }
+
+ ad->time_hw = ((uint64_t)hi << 32) | lo;
+ }
- delta = (in_timestamp - (uint32_t)(time & mask));
+ delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
if (delta > (mask / 2)) {
- delta = ((uint32_t)(time & mask) - in_timestamp);
- ns = time - delta;
+ delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
+ ns = ad->time_hw - delta;
} else {
- ns = time + delta;
+ ns = ad->time_hw + delta;
}
return ns;