X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.c;h=bfb3a16ae222e7435699b261cba1f9ad16a4b3cd;hb=3e87e12dc8bcb1d06dafcb302b056fee51deb090;hp=a20f4c751a1b2d53b64a2bed01ad293e0475b1aa;hpb=250e2ed8d85d038ce864052ebd6f9af51db40df2;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index a20f4c751a..bfb3a16ae2 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -10,11 +10,10 @@ #include "ice_rxtx.h" #include "ice_rxtx_vec_common.h" -#define ICE_TX_CKSUM_OFFLOAD_MASK ( \ - PKT_TX_IP_CKSUM | \ - PKT_TX_L4_MASK | \ - PKT_TX_TCP_SEG | \ - PKT_TX_OUTER_IP_CKSUM) +#define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG | \ + RTE_MBUF_F_TX_OUTER_IP_CKSUM) /* Offset of mbuf dynamic field for protocol extraction data */ int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1; @@ -88,13 +87,13 @@ ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq, uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0); if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { - mb->ol_flags |= PKT_RX_RSS_HASH; + mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); } #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (desc->flow_id != 0xFFFFFFFF) { - mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); } #endif @@ -112,14 +111,14 @@ ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq, #endif if (desc->flow_id != 0xFFFFFFFF) { - mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); } #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC stat_err = rte_le_to_cpu_16(desc->status_error0); if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { - mb->ol_flags |= PKT_RX_RSS_HASH; + mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); } #endif @@ -136,13 +135,13 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq, stat_err = rte_le_to_cpu_16(desc->status_error0); if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { - mb->ol_flags |= PKT_RX_RSS_HASH; + mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); } #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (desc->flow_id != 0xFFFFFFFF) { - mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); } @@ -164,6 +163,8 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq, *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata; } } +#else + RTE_SET_USED(rxq); #endif } @@ -178,13 +179,13 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq, stat_err = rte_le_to_cpu_16(desc->status_error0); if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { - mb->ol_flags |= PKT_RX_RSS_HASH; + mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); } #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (desc->flow_id != 0xFFFFFFFF) { - mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); } @@ -202,54 +203,60 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq, *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata; } } +#else + RTE_SET_USED(rxq); #endif } +static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = { + [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1, + [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1, + [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1, + [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1, + [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1, + [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2, + [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic, + [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs, +}; + void ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid) { + rxq->rxdid = rxdid; + switch (rxdid) { case ICE_RXDID_COMMS_AUX_VLAN: rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask; - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1; break; case ICE_RXDID_COMMS_AUX_IPV4: rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask; - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1; break; case ICE_RXDID_COMMS_AUX_IPV6: rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask; - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1; break; case ICE_RXDID_COMMS_AUX_IPV6_FLOW: rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask; - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1; break; case ICE_RXDID_COMMS_AUX_TCP: rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask; - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1; break; case ICE_RXDID_COMMS_AUX_IP_OFFSET: rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask; - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2; break; case ICE_RXDID_COMMS_GENERIC: - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic; - break; - + /* fallthrough */ case ICE_RXDID_COMMS_OVS: - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs; break; default: /* update this according to the RXDID for PROTO_XTR_NONE */ - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs; + rxq->rxdid = ICE_RXDID_COMMS_OVS; break; } @@ -267,43 +274,30 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) struct ice_rlan_ctx rx_ctx; enum ice_status err; uint16_t buf_size; - struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode; uint32_t rxdid = ICE_RXDID_COMMS_OVS; uint32_t regval; struct ice_adapter *ad = rxq->vsi->adapter; + uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD; /* Set buffer size as the head split is disabled. */ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM); rxq->rx_hdr_len = 0; rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); - rxq->max_pkt_len = RTE_MIN((uint32_t) - ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, - dev_data->dev_conf.rxmode.max_rx_pkt_len); - - if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN || - rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) { - PMD_DRV_LOG(ERR, "maximum packet length must " - "be larger than %u and smaller than %u," - "as jumbo frame is enabled", - (uint32_t)ICE_ETH_MAX_LEN, - (uint32_t)ICE_FRAME_SIZE_MAX); - return -EINVAL; - } - } else { - if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || - rxq->max_pkt_len > ICE_ETH_MAX_LEN) { - PMD_DRV_LOG(ERR, "maximum packet length must be " - "larger than %u and smaller than %u, " - "as jumbo frame is disabled", - (uint32_t)RTE_ETHER_MIN_LEN, - (uint32_t)ICE_ETH_MAX_LEN); - return -EINVAL; - } + rxq->max_pkt_len = + RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, + frame_size); + + if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN || + rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must " + "be larger than %u and smaller than %u", + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)ICE_FRAME_SIZE_MAX); + return -EINVAL; } - if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) { + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { /* Register mbuf field and flag for Rx timestamp */ err = rte_mbuf_dyn_rx_timestamp_register( &ice_timestamp_dynfield_offset, @@ -367,7 +361,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & QRXFLXP_CNTXT_RXDID_PRIO_M; - if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) + if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) regval |= QRXFLXP_CNTXT_TS_M; ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); @@ -385,11 +379,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) return -EINVAL; } - buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - - RTE_PKTMBUF_HEADROOM); - /* Check if scattered RX needs to be used. */ - if (rxq->max_pkt_len > buf_size) + if (frame_size > buf_size) dev_data->scattered_rx = 1; rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); @@ -1120,7 +1111,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = vsi->base_queue + queue_idx; rxq->port_id = dev->data->port_id; - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; @@ -1131,7 +1122,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, rxq->proto_xtr = pf->proto_xtr != NULL ? pf->proto_xtr[queue_idx] : PROTO_XTR_NONE; - /* Allocate the maximun number of RX ring hardware descriptor. */ + /* Allocate the maximum number of RX ring hardware descriptor. */ len = ICE_MAX_RING_DESC; /** @@ -1261,7 +1252,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : ICE_DEFAULT_TX_FREE_THRESH); - /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */ tx_rs_thresh = (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ? nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH; @@ -1506,27 +1497,29 @@ ice_rxd_error_to_pkt_flags(uint16_t stat_err0) return 0; if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) { - flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); + flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | + RTE_MBUF_F_RX_L4_CKSUM_GOOD | + RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD); return flags; } if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))) - flags |= PKT_RX_IP_CKSUM_BAD; + flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; else - flags |= PKT_RX_IP_CKSUM_GOOD; + flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))) - flags |= PKT_RX_L4_CKSUM_BAD; + flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; else - flags |= PKT_RX_L4_CKSUM_GOOD; + flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) - flags |= PKT_RX_OUTER_IP_CKSUM_BAD; + flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) - flags |= PKT_RX_OUTER_L4_CKSUM_BAD; + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; else - flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; return flags; } @@ -1536,7 +1529,7 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) { if (rte_le_to_cpu_16(rxdp->wb.status_error0) & (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { - mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1); PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", @@ -1548,8 +1541,8 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (rte_le_to_cpu_16(rxdp->wb.status_error1) & (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) { - mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ | - PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; + mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ | + RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; mb->vlan_tci_outer = mb->vlan_tci; mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", @@ -1567,6 +1560,9 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) #if (ICE_LOOK_AHEAD != 8) #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n" #endif + +#define ICE_PTP_TS_VALID 0x1 + static inline int ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) { @@ -1580,9 +1576,10 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) uint64_t pkt_flags = 0; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + bool is_tsinit = false; + uint64_t ts_ns; struct ice_vsi *vsi = rxq->vsi; struct ice_hw *hw = ICE_VSI_TO_HW(vsi); - uint64_t ts_ns; struct ice_adapter *ad = rxq->vsi->adapter; #endif rxdp = &rxq->rx_ring[rxq->rx_tail]; @@ -1594,6 +1591,15 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) return 0; +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { + uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); + + if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) + is_tsinit = 1; + } +#endif + /** * Scan LOOK_AHEAD descriptors at a time to determine which * descriptors reference packets that are ready to be received. @@ -1625,17 +1631,29 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; ice_rxd_to_vlan_tci(mb, &rxdp[j]); - rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]); + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]); #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC - if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) { - ts_ns = ice_tstamp_convert_32b_64b(hw, - rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high)); - if (ice_timestamp_dynflag > 0) { - *RTE_MBUF_DYNFIELD(mb, - ice_timestamp_dynfield_offset, - rte_mbuf_timestamp_t *) = ts_ns; - mb->ol_flags |= ice_timestamp_dynflag; + if (ice_timestamp_dynflag > 0) { + rxq->time_high = + rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); + if (unlikely(is_tsinit)) { + ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, + rxq->time_high); + rxq->hw_time_low = (uint32_t)ts_ns; + rxq->hw_time_high = (uint32_t)(ts_ns >> 32); + is_tsinit = false; + } else { + if (rxq->time_high < rxq->hw_time_low) + rxq->hw_time_high += 1; + ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; + rxq->hw_time_low = rxq->time_high; } + rxq->hw_time_update = rte_get_timer_cycles() / + (rte_get_timer_hz() / 1000); + *RTE_MBUF_DYNFIELD(mb, + ice_timestamp_dynfield_offset, + rte_mbuf_timestamp_t *) = ts_ns; + pkt_flags |= ice_timestamp_dynflag; } if (ad->ptp_ena && ((mb->packet_type & @@ -1643,7 +1661,11 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) rxq->time_high = rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); mb->timesync = rxq->queue_id; - pkt_flags |= PKT_RX_IEEE1588_PTP; + pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; + if (rxdp[j].wb.time_stamp_low & + ICE_PTP_TS_VALID) + pkt_flags |= + RTE_MBUF_F_RX_IEEE1588_TMST; } #endif mb->ol_flags |= pkt_flags; @@ -1724,7 +1746,7 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) rxdp[i].read.pkt_addr = dma_addr; } - /* Update rx tail regsiter */ + /* Update Rx tail register */ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); rxq->rx_free_trigger = @@ -1830,11 +1852,20 @@ ice_recv_scattered_pkts(void *rx_queue, uint64_t pkt_flags; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + bool is_tsinit = false; + uint64_t ts_ns; struct ice_vsi *vsi = rxq->vsi; struct ice_hw *hw = ICE_VSI_TO_HW(vsi); - uint64_t ts_ns; struct ice_adapter *ad = rxq->vsi->adapter; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { + uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); + + if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) + is_tsinit = true; + } #endif + while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); @@ -1942,18 +1973,29 @@ ice_recv_scattered_pkts(void *rx_queue, first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; ice_rxd_to_vlan_tci(first_seg, &rxd); - rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd); + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd); pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC - if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) { - ts_ns = ice_tstamp_convert_32b_64b(hw, - rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high)); - if (ice_timestamp_dynflag > 0) { - *RTE_MBUF_DYNFIELD(first_seg, - ice_timestamp_dynfield_offset, - rte_mbuf_timestamp_t *) = ts_ns; - first_seg->ol_flags |= ice_timestamp_dynflag; + if (ice_timestamp_dynflag > 0) { + rxq->time_high = + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); + if (unlikely(is_tsinit)) { + ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high); + rxq->hw_time_low = (uint32_t)ts_ns; + rxq->hw_time_high = (uint32_t)(ts_ns >> 32); + is_tsinit = false; + } else { + if (rxq->time_high < rxq->hw_time_low) + rxq->hw_time_high += 1; + ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; + rxq->hw_time_low = rxq->time_high; } + rxq->hw_time_update = rte_get_timer_cycles() / + (rte_get_timer_hz() / 1000); + *RTE_MBUF_DYNFIELD(rxm, + (ice_timestamp_dynfield_offset), + rte_mbuf_timestamp_t *) = ts_ns; + pkt_flags |= ice_timestamp_dynflag; } if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK) @@ -1961,7 +2003,7 @@ ice_recv_scattered_pkts(void *rx_queue, rxq->time_high = rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); first_seg->timesync = rxq->queue_id; - pkt_flags |= PKT_RX_IEEE1588_PTP; + pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; } #endif first_seg->ol_flags |= pkt_flags; @@ -1982,7 +2024,7 @@ ice_recv_scattered_pkts(void *rx_queue, * threshold of the queue, advance the Receive Descriptor Tail (RDT) * register. Update the RDT with the value of the last processed RX * descriptor minus 1, to guarantee that the RDT register is never - * equal to the RDH register, which creates a "full" ring situtation + * equal to the RDH register, which creates a "full" ring situation * from the hardware point of view. */ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); @@ -2320,11 +2362,20 @@ ice_recv_pkts(void *rx_queue, uint64_t pkt_flags; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + bool is_tsinit = false; + uint64_t ts_ns; struct ice_vsi *vsi = rxq->vsi; struct ice_hw *hw = ICE_VSI_TO_HW(vsi); - uint64_t ts_ns; struct ice_adapter *ad = rxq->vsi->adapter; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { + uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); + + if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) + is_tsinit = 1; + } #endif + while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); @@ -2373,18 +2424,29 @@ ice_recv_pkts(void *rx_queue, rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; ice_rxd_to_vlan_tci(rxm, &rxd); - rxq->rxd_to_pkt_fields(rxq, rxm, &rxd); + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd); pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC - if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) { - ts_ns = ice_tstamp_convert_32b_64b(hw, - rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high)); - if (ice_timestamp_dynflag > 0) { - *RTE_MBUF_DYNFIELD(rxm, - ice_timestamp_dynfield_offset, - rte_mbuf_timestamp_t *) = ts_ns; - rxm->ol_flags |= ice_timestamp_dynflag; + if (ice_timestamp_dynflag > 0) { + rxq->time_high = + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); + if (unlikely(is_tsinit)) { + ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high); + rxq->hw_time_low = (uint32_t)ts_ns; + rxq->hw_time_high = (uint32_t)(ts_ns >> 32); + is_tsinit = false; + } else { + if (rxq->time_high < rxq->hw_time_low) + rxq->hw_time_high += 1; + ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; + rxq->hw_time_low = rxq->time_high; } + rxq->hw_time_update = rte_get_timer_cycles() / + (rte_get_timer_hz() / 1000); + *RTE_MBUF_DYNFIELD(rxm, + (ice_timestamp_dynfield_offset), + rte_mbuf_timestamp_t *) = ts_ns; + pkt_flags |= ice_timestamp_dynflag; } if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) == @@ -2392,13 +2454,14 @@ ice_recv_pkts(void *rx_queue, rxq->time_high = rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); rxm->timesync = rxq->queue_id; - pkt_flags |= PKT_RX_IEEE1588_PTP; + pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; } #endif rxm->ol_flags |= pkt_flags; /* copy old mbuf to rx_pkts */ rx_pkts[nb_rx++] = rxm; } + rxq->rx_tail = rx_id; /** * If the number of free RX descriptors is greater than the RX free @@ -2426,11 +2489,11 @@ ice_parse_tunneling_params(uint64_t ol_flags, uint32_t *cd_tunneling) { /* EIPT: External (outer) IP header type */ - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4; - else if (ol_flags & PKT_TX_OUTER_IPV4) + else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM; - else if (ol_flags & PKT_TX_OUTER_IPV6) + else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6; /* EIPLEN: External (outer) IP header length, in DWords */ @@ -2438,16 +2501,16 @@ ice_parse_tunneling_params(uint64_t ol_flags, ICE_TXD_CTX_QW0_EIPLEN_S; /* L4TUNT: L4 Tunneling Type */ - switch (ol_flags & PKT_TX_TUNNEL_MASK) { - case PKT_TX_TUNNEL_IPIP: + switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { + case RTE_MBUF_F_TX_TUNNEL_IPIP: /* for non UDP / GRE tunneling, set to 00b */ break; - case PKT_TX_TUNNEL_VXLAN: - case PKT_TX_TUNNEL_GTP: - case PKT_TX_TUNNEL_GENEVE: + case RTE_MBUF_F_TX_TUNNEL_VXLAN: + case RTE_MBUF_F_TX_TUNNEL_GTP: + case RTE_MBUF_F_TX_TUNNEL_GENEVE: *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING; break; - case PKT_TX_TUNNEL_GRE: + case RTE_MBUF_F_TX_TUNNEL_GRE: *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING; break; default: @@ -2484,7 +2547,7 @@ ice_txd_enable_checksum(uint64_t ol_flags, union ice_tx_offload tx_offload) { /* Set MACLEN */ - if (ol_flags & PKT_TX_TUNNEL_MASK) + if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) *td_offset |= (tx_offload.outer_l2_len >> 1) << ICE_TX_DESC_LEN_MACLEN_S; else @@ -2492,21 +2555,21 @@ ice_txd_enable_checksum(uint64_t ol_flags, << ICE_TX_DESC_LEN_MACLEN_S; /* Enable L3 checksum offloads */ - if (ol_flags & PKT_TX_IP_CKSUM) { + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; *td_offset |= (tx_offload.l3_len >> 2) << - ICE_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & PKT_TX_IPV4) { + ICE_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; *td_offset |= (tx_offload.l3_len >> 2) << - ICE_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & PKT_TX_IPV6) { + ICE_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; *td_offset |= (tx_offload.l3_len >> 2) << - ICE_TX_DESC_LEN_IPLEN_S; + ICE_TX_DESC_LEN_IPLEN_S; } - if (ol_flags & PKT_TX_TCP_SEG) { + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; *td_offset |= (tx_offload.l4_len >> 2) << ICE_TX_DESC_LEN_L4_LEN_S; @@ -2514,18 +2577,18 @@ ice_txd_enable_checksum(uint64_t ol_flags, } /* Enable L4 checksum offloads */ - switch (ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_TCP_CKSUM: + switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { + case RTE_MBUF_F_TX_TCP_CKSUM: *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << ICE_TX_DESC_LEN_L4_LEN_S; break; - case PKT_TX_SCTP_CKSUM: + case RTE_MBUF_F_TX_SCTP_CKSUM: *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << ICE_TX_DESC_LEN_L4_LEN_S; break; - case PKT_TX_UDP_CKSUM: + case RTE_MBUF_F_TX_UDP_CKSUM: *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << ICE_TX_DESC_LEN_L4_LEN_S; @@ -2603,11 +2666,11 @@ ice_build_ctob(uint32_t td_cmd, static inline uint16_t ice_calc_context_desc(uint64_t flags) { - static uint64_t mask = PKT_TX_TCP_SEG | - PKT_TX_QINQ | - PKT_TX_OUTER_IP_CKSUM | - PKT_TX_TUNNEL_MASK | - PKT_TX_IEEE1588_TMST; + static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG | + RTE_MBUF_F_TX_QINQ | + RTE_MBUF_F_TX_OUTER_IP_CKSUM | + RTE_MBUF_F_TX_TUNNEL_MASK | + RTE_MBUF_F_TX_IEEE1588_TMST; return (flags & mask) ? 1 : 0; } @@ -2625,7 +2688,7 @@ ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload) } hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; - hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ? + hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0; cd_cmd = ICE_TX_CTX_DESC_TSO; @@ -2712,7 +2775,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * the mbuf data size exceeds max data size that hw allows * per tx desc. */ - if (ol_flags & PKT_TX_TCP_SEG) + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) + nb_ctx); else @@ -2741,14 +2804,14 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* Descriptor based VLAN insertion */ - if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) { + if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { td_cmd |= ICE_TX_DESC_CMD_IL2TAG1; td_tag = tx_pkt->vlan_tci; } /* Fill in tunneling parameters if necessary */ cd_tunneling_params = 0; - if (ol_flags & PKT_TX_TUNNEL_MASK) + if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ice_parse_tunneling_params(ol_flags, tx_offload, &cd_tunneling_params); @@ -2772,10 +2835,10 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txe->mbuf = NULL; } - if (ol_flags & PKT_TX_TCP_SEG) + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) cd_type_cmd_tso_mss |= ice_set_tso_ctx(tx_pkt, tx_offload); - else if (ol_flags & PKT_TX_IEEE1588_TMST) + else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) cd_type_cmd_tso_mss |= ((uint64_t)ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S); @@ -2784,7 +2847,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_cpu_to_le_32(cd_tunneling_params); /* TX context descriptor based double VLAN insert */ - if (ol_flags & PKT_TX_QINQ) { + if (ol_flags & RTE_MBUF_F_TX_QINQ) { cd_l2tag2 = tx_pkt->vlan_tci_outer; cd_type_cmd_tso_mss |= ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 << @@ -2812,7 +2875,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) slen = m_seg->data_len; buf_dma_addr = rte_mbuf_data_iova(m_seg); - while ((ol_flags & PKT_TX_TCP_SEG) && + while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) && unlikely(slen > ICE_MAX_DATA_PER_TXD)) { txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); txd->cmd_type_offset_bsz = @@ -2892,7 +2955,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq) for (i = 0; i < txq->tx_rs_thresh; i++) rte_prefetch0((txep + i)->mbuf); - if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) { + if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { rte_mempool_put(txep->mbuf->pool, txep->mbuf); txep->mbuf = NULL; @@ -3119,7 +3182,7 @@ tx_xmit_pkts(struct ice_tx_queue *txq, ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); - /* Determin if RS bit needs to be set */ + /* Determine if RS bit needs to be set */ if (txq->tx_tail > txq->tx_next_rs) { txr[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << @@ -3182,6 +3245,8 @@ ice_set_rx_function(struct rte_eth_dev *dev) rx_check_ret = ice_rx_vec_dev_check(dev); if (ad->ptp_ena) rx_check_ret = -1; + ad->rx_vec_offload_support = + (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH); if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed && rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { ad->rx_vec_allowed = true; @@ -3217,7 +3282,7 @@ ice_set_rx_function(struct rte_eth_dev *dev) if (dev->data->scattered_rx) { if (ad->rx_use_avx512) { #ifdef CC_AVX512_SUPPORT - if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { + if (ad->rx_vec_offload_support) { PMD_DRV_LOG(NOTICE, "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).", dev->data->port_id); @@ -3232,7 +3297,7 @@ ice_set_rx_function(struct rte_eth_dev *dev) } #endif } else if (ad->rx_use_avx2) { - if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { + if (ad->rx_vec_offload_support) { PMD_DRV_LOG(NOTICE, "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).", dev->data->port_id); @@ -3254,7 +3319,7 @@ ice_set_rx_function(struct rte_eth_dev *dev) } else { if (ad->rx_use_avx512) { #ifdef CC_AVX512_SUPPORT - if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { + if (ad->rx_vec_offload_support) { PMD_DRV_LOG(NOTICE, "Using AVX512 OFFLOAD Vector Rx (port %d).", dev->data->port_id); @@ -3269,7 +3334,7 @@ ice_set_rx_function(struct rte_eth_dev *dev) } #endif } else if (ad->rx_use_avx2) { - if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { + if (ad->rx_vec_offload_support) { PMD_DRV_LOG(NOTICE, "Using AVX2 OFFLOAD Vector Rx (port %d).", dev->data->port_id); @@ -3368,7 +3433,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq) /* Use a simple Tx queue if possible (only fast free is allowed) */ ad->tx_simple_allowed = (txq->offloads == - (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) && + (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) && txq->tx_rs_thresh >= ICE_TX_MAX_BURST); if (ad->tx_simple_allowed) @@ -3401,7 +3466,7 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, m = tx_pkts[i]; ol_flags = m->ol_flags; - if (ol_flags & PKT_TX_TCP_SEG && + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && (m->tso_segsz < ICE_MIN_TSO_MSS || m->tso_segsz > ICE_MAX_TSO_MSS || m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) { @@ -3541,8 +3606,9 @@ static const struct { { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" }, #endif - { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, - { ice_xmit_pkts_vec, "Vector SSE" }, + { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, + { ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" }, + { ice_xmit_pkts_vec, "Vector SSE" }, #endif };