*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
}
+#else
+ RTE_SET_USED(rxq);
#endif
}
*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
}
+#else
+ RTE_SET_USED(rxq);
#endif
}
rxq->proto_xtr = pf->proto_xtr != NULL ?
pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
- /* Allocate the maximun number of RX ring hardware descriptor. */
+ /* Allocate the maximum number of RX ring hardware descriptor. */
len = ICE_MAX_RING_DESC;
/**
tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
tx_conf->tx_free_thresh :
ICE_DEFAULT_TX_FREE_THRESH);
- /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
tx_rs_thresh =
(ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
return 0;
if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
- flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
+ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD);
return flags;
}
#if (ICE_LOOK_AHEAD != 8)
#error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
#endif
+
+#define ICE_PTP_TS_VALID 0x1
+
static inline int
ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
{
uint64_t pkt_flags = 0;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ bool is_tsinit = false;
+ uint64_t ts_ns;
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
- uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
#endif
rxdp = &rxq->rx_ring[rxq->rx_tail];
if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
+ is_tsinit = 1;
+ }
+#endif
+
/**
* Scan LOOK_AHEAD descriptors at a time to determine which
* descriptors reference packets that are ready to be received.
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- ts_ns = ice_tstamp_convert_32b_64b(hw,
- rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
- if (ice_timestamp_dynflag > 0) {
- *RTE_MBUF_DYNFIELD(mb,
- ice_timestamp_dynfield_offset,
- rte_mbuf_timestamp_t *) = ts_ns;
- mb->ol_flags |= ice_timestamp_dynflag;
+ if (ice_timestamp_dynflag > 0) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+ if (unlikely(is_tsinit)) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
+ rxq->time_high);
+ rxq->hw_time_low = (uint32_t)ts_ns;
+ rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
+ is_tsinit = false;
+ } else {
+ if (rxq->time_high < rxq->hw_time_low)
+ rxq->hw_time_high += 1;
+ ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
+ rxq->hw_time_low = rxq->time_high;
}
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
+ *RTE_MBUF_DYNFIELD(mb,
+ ice_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ pkt_flags |= ice_timestamp_dynflag;
}
if (ad->ptp_ena && ((mb->packet_type &
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
mb->timesync = rxq->queue_id;
pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+ if (rxdp[j].wb.time_stamp_low &
+ ICE_PTP_TS_VALID)
+ pkt_flags |=
+ RTE_MBUF_F_RX_IEEE1588_TMST;
}
#endif
mb->ol_flags |= pkt_flags;
rxdp[i].read.pkt_addr = dma_addr;
}
- /* Update rx tail regsiter */
+ /* Update Rx tail register */
ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ bool is_tsinit = false;
+ uint64_t ts_ns;
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
- uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
+ is_tsinit = true;
+ }
#endif
+
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- ts_ns = ice_tstamp_convert_32b_64b(hw,
- rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- if (ice_timestamp_dynflag > 0) {
- *RTE_MBUF_DYNFIELD(first_seg,
- ice_timestamp_dynfield_offset,
- rte_mbuf_timestamp_t *) = ts_ns;
- first_seg->ol_flags |= ice_timestamp_dynflag;
+ if (ice_timestamp_dynflag > 0) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ if (unlikely(is_tsinit)) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
+ rxq->hw_time_low = (uint32_t)ts_ns;
+ rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
+ is_tsinit = false;
+ } else {
+ if (rxq->time_high < rxq->hw_time_low)
+ rxq->hw_time_high += 1;
+ ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
+ rxq->hw_time_low = rxq->time_high;
}
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
+ *RTE_MBUF_DYNFIELD(rxm,
+ (ice_timestamp_dynfield_offset),
+ rte_mbuf_timestamp_t *) = ts_ns;
+ pkt_flags |= ice_timestamp_dynflag;
}
if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
* threshold of the queue, advance the Receive Descriptor Tail (RDT)
* register. Update the RDT with the value of the last processed RX
* descriptor minus 1, to guarantee that the RDT register is never
- * equal to the RDH register, which creates a "full" ring situtation
+ * equal to the RDH register, which creates a "full" ring situation
* from the hardware point of view.
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ bool is_tsinit = false;
+ uint64_t ts_ns;
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
- uint64_t ts_ns;
struct ice_adapter *ad = rxq->vsi->adapter;
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
+ is_tsinit = 1;
+ }
#endif
+
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- ts_ns = ice_tstamp_convert_32b_64b(hw,
- rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- if (ice_timestamp_dynflag > 0) {
- *RTE_MBUF_DYNFIELD(rxm,
- ice_timestamp_dynfield_offset,
- rte_mbuf_timestamp_t *) = ts_ns;
- rxm->ol_flags |= ice_timestamp_dynflag;
+ if (ice_timestamp_dynflag > 0) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ if (unlikely(is_tsinit)) {
+ ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
+ rxq->hw_time_low = (uint32_t)ts_ns;
+ rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
+ is_tsinit = false;
+ } else {
+ if (rxq->time_high < rxq->hw_time_low)
+ rxq->hw_time_high += 1;
+ ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
+ rxq->hw_time_low = rxq->time_high;
}
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
+ *RTE_MBUF_DYNFIELD(rxm,
+ (ice_timestamp_dynfield_offset),
+ rte_mbuf_timestamp_t *) = ts_ns;
+ pkt_flags |= ice_timestamp_dynflag;
}
if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
/* copy old mbuf to rx_pkts */
rx_pkts[nb_rx++] = rxm;
}
+
rxq->rx_tail = rx_id;
/**
* If the number of free RX descriptors is greater than the RX free
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2) <<
- ICE_TX_DESC_LEN_IPLEN_S;
+ ICE_TX_DESC_LEN_IPLEN_S;
} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2) <<
- ICE_TX_DESC_LEN_IPLEN_S;
+ ICE_TX_DESC_LEN_IPLEN_S;
} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2) <<
- ICE_TX_DESC_LEN_IPLEN_S;
+ ICE_TX_DESC_LEN_IPLEN_S;
}
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
- /* Determin if RS bit needs to be set */
+ /* Determine if RS bit needs to be set */
if (txq->tx_tail > txq->tx_next_rs) {
txr[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
rx_check_ret = ice_rx_vec_dev_check(dev);
if (ad->ptp_ena)
rx_check_ret = -1;
+ ad->rx_vec_offload_support =
+ (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH);
if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->rx_vec_allowed = true;
if (dev->data->scattered_rx) {
if (ad->rx_use_avx512) {
#ifdef CC_AVX512_SUPPORT
- if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ if (ad->rx_vec_offload_support) {
PMD_DRV_LOG(NOTICE,
"Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
dev->data->port_id);
}
#endif
} else if (ad->rx_use_avx2) {
- if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ if (ad->rx_vec_offload_support) {
PMD_DRV_LOG(NOTICE,
"Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
dev->data->port_id);
} else {
if (ad->rx_use_avx512) {
#ifdef CC_AVX512_SUPPORT
- if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ if (ad->rx_vec_offload_support) {
PMD_DRV_LOG(NOTICE,
"Using AVX512 OFFLOAD Vector Rx (port %d).",
dev->data->port_id);
}
#endif
} else if (ad->rx_use_avx2) {
- if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ if (ad->rx_vec_offload_support) {
PMD_DRV_LOG(NOTICE,
"Using AVX2 OFFLOAD Vector Rx (port %d).",
dev->data->port_id);
{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
#endif
- { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
- { ice_xmit_pkts_vec, "Vector SSE" },
+ { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
+ { ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" },
+ { ice_xmit_pkts_vec, "Vector SSE" },
#endif
};