X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.c;h=ad58442319151dd4f5aba6489129dec6b7ec42e0;hb=6982f1848f985f89ecc4b16b3a09a504da3fec5b;hp=deb904df37a5391231d286885256af5281bec203;hpb=33011cb3dfa852014e440489e95fe07e20ad7a1f;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index deb904df37..ad58442319 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -23,27 +23,40 @@ uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask; uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask; uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask; uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask; +uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask; static inline uint64_t -ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid) -{ - static uint64_t *ol_flag_map[] = { - [ICE_RXDID_COMMS_AUX_VLAN] = - &rte_net_ice_dynflag_proto_xtr_vlan_mask, - [ICE_RXDID_COMMS_AUX_IPV4] = - &rte_net_ice_dynflag_proto_xtr_ipv4_mask, - [ICE_RXDID_COMMS_AUX_IPV6] = - &rte_net_ice_dynflag_proto_xtr_ipv6_mask, - [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = - &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, - [ICE_RXDID_COMMS_AUX_TCP] = - &rte_net_ice_dynflag_proto_xtr_tcp_mask, +ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid) +{ + static struct { + uint64_t *ol_flag; + bool chk_valid; + } ol_flag_map[] = { + [ICE_RXDID_COMMS_AUX_VLAN] = { + &rte_net_ice_dynflag_proto_xtr_vlan_mask, true }, + [ICE_RXDID_COMMS_AUX_IPV4] = { + &rte_net_ice_dynflag_proto_xtr_ipv4_mask, true }, + [ICE_RXDID_COMMS_AUX_IPV6] = { + &rte_net_ice_dynflag_proto_xtr_ipv6_mask, true }, + [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = { + &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true }, + [ICE_RXDID_COMMS_AUX_TCP] = { + &rte_net_ice_dynflag_proto_xtr_tcp_mask, true }, + [ICE_RXDID_COMMS_AUX_IP_OFFSET] = { + &rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false }, }; uint64_t *ol_flag; - ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL; + if (rxdid < RTE_DIM(ol_flag_map)) { + ol_flag = ol_flag_map[rxdid].ol_flag; + if (!ol_flag) + return 0ULL; - return ol_flag != NULL ? *ol_flag : 0ULL; + *chk_valid = ol_flag_map[rxdid].chk_valid; + return *ol_flag; + } + + return 0ULL; } static inline uint8_t @@ -56,6 +69,7 @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6, [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW, [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP, + [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET, }; return xtr_type < RTE_DIM(rxdid_map) ? @@ -481,7 +495,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ - ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, + ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, ice_tlan_ctx_info); txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); @@ -653,7 +667,7 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ - ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, + ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, ice_tlan_ctx_info); txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); @@ -1312,23 +1326,32 @@ ice_rxd_to_proto_xtr(struct rte_mbuf *mb, volatile struct ice_32b_rx_flex_desc_comms *desc) { uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1); - uint32_t metadata; + uint32_t metadata = 0; uint64_t ol_flag; + bool chk_valid; - if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID))) - return; - - ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid); + ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid); if (unlikely(!ol_flag)) return; - mb->ol_flags |= ol_flag; + if (chk_valid) { + if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S)) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); - metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ? - rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0; + if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)) + metadata |= + rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; + } else { + if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); + else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1); + } - if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))) - metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; + if (!metadata) + return; + + mb->ol_flags |= ol_flag; *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata; } @@ -1905,6 +1928,7 @@ ice_free_queues(struct rte_eth_dev *dev) continue; ice_rx_queue_release(dev->data->rx_queues[i]); dev->data->rx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "rx_ring", i); } dev->data->nb_rx_queues = 0; @@ -1913,6 +1937,7 @@ ice_free_queues(struct rte_eth_dev *dev) continue; ice_tx_queue_release(dev->data->tx_queues[i]); dev->data->tx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "tx_ring", i); } dev->data->nb_tx_queues = 0; } @@ -2407,6 +2432,8 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) tx_pkt = *tx_pkts++; td_cmd = 0; + td_tag = 0; + td_offset = 0; ol_flags = tx_pkt->ol_flags; tx_offload.l2_len = tx_pkt->l2_len; tx_offload.l3_len = tx_pkt->l3_len; @@ -2465,10 +2492,9 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) &cd_tunneling_params); /* Enable checksum offloading */ - if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) { + if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) ice_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, tx_offload); - } if (nb_ctx) { /* Setup TX context descriptor if required */ @@ -2874,7 +2900,7 @@ ice_xmit_pkts_simple(void *tx_queue, return nb_tx; } -void __attribute__((cold)) +void __rte_cold ice_set_rx_function(struct rte_eth_dev *dev) { PMD_INIT_FUNC_TRACE(); @@ -2984,7 +3010,7 @@ ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, return ret; } -void __attribute__((cold)) +void __rte_cold ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq) { struct ice_adapter *ad = @@ -3053,7 +3079,7 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } -void __attribute__((cold)) +void __rte_cold ice_set_tx_function(struct rte_eth_dev *dev) { struct ice_adapter *ad = @@ -3682,7 +3708,7 @@ ice_get_default_pkt_type(uint16_t ptype) return type_table[ptype]; } -void __attribute__((cold)) +void __rte_cold ice_set_default_ptype_table(struct rte_eth_dev *dev) { struct ice_adapter *ad =