*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
}
+#else
+ RTE_SET_USED(rxq);
#endif
}
*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
}
+#else
+ RTE_SET_USED(rxq);
#endif
}
return 0;
if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
- flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
+ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD);
return flags;
}
#if (ICE_LOOK_AHEAD != 8)
#error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
#endif
+
+#define ICE_PTP_TS_VALID 0x1
+
static inline int
ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
{
uint64_t pkt_flags = 0;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ bool is_tsinit = false;
+ uint64_t ts_ns;
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
- uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
#endif
rxdp = &rxq->rx_ring[rxq->rx_tail];
if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
- rxq->hw_register_set = 1;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
+ is_tsinit = 1;
+ }
+#endif
/**
* Scan LOOK_AHEAD descriptors at a time to determine which
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (ice_timestamp_dynflag > 0) {
- ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
- rxq->hw_register_set,
- rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
- rxq->hw_register_set = 0;
+ rxq->time_high =
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+ if (unlikely(is_tsinit)) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
+ rxq->time_high);
+ rxq->hw_time_low = (uint32_t)ts_ns;
+ rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
+ is_tsinit = false;
+ } else {
+ if (rxq->time_high < rxq->hw_time_low)
+ rxq->hw_time_high += 1;
+ ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
+ rxq->hw_time_low = rxq->time_high;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(mb,
- ice_timestamp_dynfield_offset,
- rte_mbuf_timestamp_t *) = ts_ns;
- mb->ol_flags |= ice_timestamp_dynflag;
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ pkt_flags |= ice_timestamp_dynflag;
}
if (ad->ptp_ena && ((mb->packet_type &
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
mb->timesync = rxq->queue_id;
pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+ if (rxdp[j].wb.time_stamp_low &
+ ICE_PTP_TS_VALID)
+ pkt_flags |=
+ RTE_MBUF_F_RX_IEEE1588_TMST;
}
#endif
mb->ol_flags |= pkt_flags;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ bool is_tsinit = false;
+ uint64_t ts_ns;
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
- uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
-#endif
- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
- rxq->hw_register_set = 1;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
+ is_tsinit = true;
+ }
+#endif
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (ice_timestamp_dynflag > 0) {
- ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
- rxq->hw_register_set,
- rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- rxq->hw_register_set = 0;
- *RTE_MBUF_DYNFIELD(first_seg,
- ice_timestamp_dynfield_offset,
- rte_mbuf_timestamp_t *) = ts_ns;
- first_seg->ol_flags |= ice_timestamp_dynflag;
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ if (unlikely(is_tsinit)) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
+ rxq->hw_time_low = (uint32_t)ts_ns;
+ rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
+ is_tsinit = false;
+ } else {
+ if (rxq->time_high < rxq->hw_time_low)
+ rxq->hw_time_high += 1;
+ ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
+ rxq->hw_time_low = rxq->time_high;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
+ *RTE_MBUF_DYNFIELD(rxm,
+ (ice_timestamp_dynfield_offset),
+ rte_mbuf_timestamp_t *) = ts_ns;
+ pkt_flags |= ice_timestamp_dynflag;
}
if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ bool is_tsinit = false;
+ uint64_t ts_ns;
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
- uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
-#endif
- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
- rxq->hw_register_set = 1;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
+ is_tsinit = 1;
+ }
+#endif
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (ice_timestamp_dynflag > 0) {
- ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
- rxq->hw_register_set,
- rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- rxq->hw_register_set = 0;
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ if (unlikely(is_tsinit)) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
+ rxq->hw_time_low = (uint32_t)ts_ns;
+ rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
+ is_tsinit = false;
+ } else {
+ if (rxq->time_high < rxq->hw_time_low)
+ rxq->hw_time_high += 1;
+ ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
+ rxq->hw_time_low = rxq->time_high;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
- ice_timestamp_dynfield_offset,
- rte_mbuf_timestamp_t *) = ts_ns;
- rxm->ol_flags |= ice_timestamp_dynflag;
+ (ice_timestamp_dynfield_offset),
+ rte_mbuf_timestamp_t *) = ts_ns;
+ pkt_flags |= ice_timestamp_dynflag;
}
if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
/* copy old mbuf to rx_pkts */
rx_pkts[nb_rx++] = rxm;
}
+
rxq->rx_tail = rx_id;
/**
* If the number of free RX descriptors is greater than the RX free
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2) <<
- ICE_TX_DESC_LEN_IPLEN_S;
+ ICE_TX_DESC_LEN_IPLEN_S;
} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2) <<
- ICE_TX_DESC_LEN_IPLEN_S;
+ ICE_TX_DESC_LEN_IPLEN_S;
} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2) <<
- ICE_TX_DESC_LEN_IPLEN_S;
+ ICE_TX_DESC_LEN_IPLEN_S;
}
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
#endif
- { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
- { ice_xmit_pkts_vec, "Vector SSE" },
+ { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
+ { ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" },
+ { ice_xmit_pkts_vec, "Vector SSE" },
#endif
};