#define ICE_ONE_PPS_OUT_ARG "pps_out"
#define ICE_RX_LOW_LATENCY_ARG "rx_low_latency"
+uint64_t ice_timestamp_dynflag;
+int ice_timestamp_dynfield_offset = -1;
+
static const char * const ice_valid_args[] = {
ICE_SAFE_MODE_SUPPORT_ARG,
ICE_PIPELINE_MODE_SUPPORT_ARG,
DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_RSS_HASH;
+ DEV_RX_OFFLOAD_RSS_HASH |
+ DEV_RX_OFFLOAD_TIMESTAMP;
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
}
}
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ /* Register mbuf field and flag for Rx timestamp */
+ err = rte_mbuf_dyn_rx_timestamp_register(
+ &ice_timestamp_dynfield_offset,
+ &ice_timestamp_dynflag);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Cannot register mbuf field/flag for timestamp");
+ return -EINVAL;
+ }
+ }
+
memset(&rx_ctx, 0, sizeof(rx_ctx));
rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ regval |= QRXFLXP_CNTXT_TS_M;
+
ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags = 0;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint64_t ts_ns;
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw,
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+ if (ice_timestamp_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(mb,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ mb->ol_flags |= ice_timestamp_dynflag;
+ }
+ }
+
mb->ol_flags |= pkt_flags;
}
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint64_t ts_ns;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
ice_rxd_to_vlan_tci(first_seg, &rxd);
rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+ if (ice_timestamp_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(first_seg,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ first_seg->ol_flags |= ice_timestamp_dynflag;
+ }
+ }
+
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint64_t ts_ns;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
ice_rxd_to_vlan_tci(rxm, &rxd);
rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+ if (ice_timestamp_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(rxm,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ rxm->ol_flags |= ice_timestamp_dynflag;
+ }
+ }
+
rxm->ol_flags |= pkt_flags;
/* copy old mbuf to rx_pkts */
rx_pkts[nb_rx++] = rxm;
#define ICE_RXDID_COMMS_OVS 22
+extern uint64_t ice_timestamp_dynflag;
+extern int ice_timestamp_dynfield_offset;
+
typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
}
}
+/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
+static inline
+uint64_t ice_tstamp_convert_32b_64b(struct ice_hw *hw, uint32_t in_timestamp)
+{
+ const uint64_t mask = 0xFFFFFFFF;
+ uint32_t hi, lo, lo2, delta;
+ uint64_t time, ns;
+
+ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+ lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+
+ if (lo2 < lo) {
+ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+ }
+
+ time = ((uint64_t)hi << 32) | lo;
+
+ delta = (in_timestamp - (uint32_t)(time & mask));
+ if (delta > (mask / 2)) {
+ delta = ((uint32_t)(time & mask) - in_timestamp);
+ ns = time - delta;
+ } else {
+ ns = time + delta;
+ }
+
+ return ns;
+}
+
#endif /* _ICE_RXTX_H_ */