X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.c;h=fef6ad45449cf1b8e1d7d83babdb6dcd4ae83dd0;hb=0ff55924712e2a35476c0929235b21d2b77d9f97;hp=b83c5c8629086c195164172b250d3ab6d55bd6a4;hpb=8a72edd9cb232b868c38373e041b03db1220e779;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index b83c5c8629..fef6ad4544 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -23,43 +23,57 @@ uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask; uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask; uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask; uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask; +uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask; static inline uint64_t -ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid) -{ - static uint64_t *ol_flag_map[] = { - [ICE_RXDID_COMMS_AUX_VLAN] = - &rte_net_ice_dynflag_proto_xtr_vlan_mask, - [ICE_RXDID_COMMS_AUX_IPV4] = - &rte_net_ice_dynflag_proto_xtr_ipv4_mask, - [ICE_RXDID_COMMS_AUX_IPV6] = - &rte_net_ice_dynflag_proto_xtr_ipv6_mask, - [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = - &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, - [ICE_RXDID_COMMS_AUX_TCP] = - &rte_net_ice_dynflag_proto_xtr_tcp_mask, +ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid) +{ + static struct { + uint64_t *ol_flag; + bool chk_valid; + } ol_flag_map[] = { + [ICE_RXDID_COMMS_AUX_VLAN] = { + &rte_net_ice_dynflag_proto_xtr_vlan_mask, true }, + [ICE_RXDID_COMMS_AUX_IPV4] = { + &rte_net_ice_dynflag_proto_xtr_ipv4_mask, true }, + [ICE_RXDID_COMMS_AUX_IPV6] = { + &rte_net_ice_dynflag_proto_xtr_ipv6_mask, true }, + [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = { + &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true }, + [ICE_RXDID_COMMS_AUX_TCP] = { + &rte_net_ice_dynflag_proto_xtr_tcp_mask, true }, + [ICE_RXDID_COMMS_AUX_IP_OFFSET] = { + &rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false }, }; uint64_t *ol_flag; - ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL; + if (rxdid < RTE_DIM(ol_flag_map)) { + ol_flag = ol_flag_map[rxdid].ol_flag; + if (!ol_flag) + return 0ULL; - return ol_flag != NULL ? *ol_flag : 0ULL; + *chk_valid = ol_flag_map[rxdid].chk_valid; + return *ol_flag; + } + + return 0ULL; } static inline uint8_t ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) { static uint8_t rxdid_map[] = { - [PROTO_XTR_NONE] = ICE_RXDID_COMMS_GENERIC, + [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS, [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN, [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4, [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6, [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW, [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP, + [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET, }; return xtr_type < RTE_DIM(rxdid_map) ? - rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC; + rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS; } static enum ice_status @@ -67,12 +81,13 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) { struct ice_vsi *vsi = rxq->vsi; struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + struct ice_pf *pf = ICE_VSI_TO_PF(vsi); struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi); struct ice_rlan_ctx rx_ctx; enum ice_status err; uint16_t buf_size, len; struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; - uint32_t rxdid = ICE_RXDID_COMMS_GENERIC; + uint32_t rxdid = ICE_RXDID_COMMS_OVS; uint32_t regval; /* Set buffer size as the head split is disabled. */ @@ -137,6 +152,12 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u", rxq->port_id, rxq->queue_id, rxdid); + if (!(pf->supported_rxdid & BIT(rxdid))) { + PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)", + rxdid); + return -EINVAL; + } + /* Enable Flexible Descriptors in the queue context which * allows this driver to select a specific receive descriptor format */ @@ -445,8 +466,9 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) int err; struct ice_vsi *vsi; struct ice_hw *hw; - struct ice_aqc_add_tx_qgrp txq_elem; + struct ice_aqc_add_tx_qgrp *txq_elem; struct ice_tlan_ctx tx_ctx; + int buf_len; PMD_INIT_FUNC_TRACE(); @@ -463,13 +485,17 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -EINVAL; } + buf_len = ice_struct_size(txq_elem, txqs, 1); + txq_elem = ice_malloc(hw, buf_len); + if (!txq_elem) + return -ENOMEM; + vsi = txq->vsi; hw = ICE_VSI_TO_HW(vsi); - memset(&txq_elem, 0, sizeof(txq_elem)); memset(&tx_ctx, 0, sizeof(tx_ctx)); - txq_elem.num_txqs = 1; - txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); + txq_elem->num_txqs = 1; + txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; tx_ctx.qlen = txq->nb_tx_desc; @@ -481,7 +507,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ - ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, + ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx, ice_tlan_ctx_info); txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); @@ -491,15 +517,18 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /* Fix me, we assume TC always 0 here */ err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, - &txq_elem, sizeof(txq_elem), NULL); + txq_elem, buf_len, NULL); if (err) { PMD_DRV_LOG(ERR, "Failed to add lan txq"); + rte_free(txq_elem); return -EIO; } /* store the schedule node id */ - txq->q_teid = txq_elem.txqs[0].q_teid; + txq->q_teid = txq_elem->txqs[0].q_teid; dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + rte_free(txq_elem); return 0; } @@ -623,8 +652,9 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) int err; struct ice_vsi *vsi; struct ice_hw *hw; - struct ice_aqc_add_tx_qgrp txq_elem; + struct ice_aqc_add_tx_qgrp *txq_elem; struct ice_tlan_ctx tx_ctx; + int buf_len; PMD_INIT_FUNC_TRACE(); @@ -635,13 +665,17 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -EINVAL; } + buf_len = ice_struct_size(txq_elem, txqs, 1); + txq_elem = ice_malloc(hw, buf_len); + if (!txq_elem) + return -ENOMEM; + vsi = txq->vsi; hw = ICE_VSI_TO_HW(vsi); - memset(&txq_elem, 0, sizeof(txq_elem)); memset(&tx_ctx, 0, sizeof(tx_ctx)); - txq_elem.num_txqs = 1; - txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); + txq_elem->num_txqs = 1; + txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; tx_ctx.qlen = txq->nb_tx_desc; @@ -653,7 +687,7 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ - ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, + ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx, ice_tlan_ctx_info); txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); @@ -663,14 +697,16 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /* Fix me, we assume TC always 0 here */ err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, - &txq_elem, sizeof(txq_elem), NULL); + txq_elem, buf_len, NULL); if (err) { PMD_DRV_LOG(ERR, "Failed to add FDIR txq"); + rte_free(txq_elem); return -EIO; } /* store the schedule node id */ - txq->q_teid = txq_elem.txqs[0].q_teid; + txq->q_teid = txq_elem->txqs[0].q_teid; + rte_free(txq_elem); return 0; } @@ -1309,26 +1345,35 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) static void ice_rxd_to_proto_xtr(struct rte_mbuf *mb, - volatile struct ice_32b_rx_flex_desc_comms *desc) + volatile struct ice_32b_rx_flex_desc_comms_ovs *desc) { uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1); - uint32_t metadata; + uint32_t metadata = 0; uint64_t ol_flag; + bool chk_valid; - if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID))) - return; - - ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid); + ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid); if (unlikely(!ol_flag)) return; - mb->ol_flags |= ol_flag; + if (chk_valid) { + if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S)) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); - metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ? - rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0; + if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)) + metadata |= + rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; + } else { + if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); + else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1); + } + + if (!metadata) + return; - if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))) - metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; + mb->ol_flags |= ol_flag; *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata; } @@ -1338,8 +1383,9 @@ static inline void ice_rxd_to_pkt_fields(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) { - volatile struct ice_32b_rx_flex_desc_comms *desc = - (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; + volatile struct ice_32b_rx_flex_desc_comms_ovs *desc = + (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp; +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC uint16_t stat_err; stat_err = rte_le_to_cpu_16(desc->status_error0); @@ -1347,13 +1393,14 @@ ice_rxd_to_pkt_fields(struct rte_mbuf *mb, mb->ol_flags |= PKT_RX_RSS_HASH; mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); } +#endif -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (desc->flow_id != 0xFFFFFFFF) { mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); } +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail())) ice_rxd_to_proto_xtr(mb, desc); #endif @@ -2234,7 +2281,7 @@ ice_txd_enable_checksum(uint64_t ol_flags, switch (ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (tx_offload.l4_len >> 2) << + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << ICE_TX_DESC_LEN_L4_LEN_S; break; case PKT_TX_SCTP_CKSUM: @@ -2371,28 +2418,6 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt) return count; } -/* Calculate TCP header length for PKT_TX_TCP_CKSUM if not provided */ -static inline uint16_t -ice_calc_pkt_tcp_hdr(struct rte_mbuf *tx_pkt, union ice_tx_offload tx_offload) -{ - uint16_t tcpoff = tx_offload.l2_len + tx_offload.l3_len; - const struct rte_tcp_hdr *tcp_hdr; - struct rte_tcp_hdr _tcp_hdr; - - if (tcpoff + sizeof(struct rte_tcp_hdr) < tx_pkt->data_len) { - tcp_hdr = rte_pktmbuf_mtod_offset(tx_pkt, struct rte_tcp_hdr *, - tcpoff); - - return (tcp_hdr->data_off & 0xf0) >> 2; - } - - tcp_hdr = rte_pktmbuf_read(tx_pkt, tcpoff, sizeof(_tcp_hdr), &_tcp_hdr); - if (tcp_hdr) - return (tcp_hdr->data_off & 0xf0) >> 2; - else - return 0; -} - uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -2491,15 +2516,9 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) &cd_tunneling_params); /* Enable checksum offloading */ - if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) { - if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM && - !tx_offload.l4_len) - tx_offload.l4_len = - ice_calc_pkt_tcp_hdr(tx_pkt, tx_offload); - + if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) ice_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, tx_offload); - } if (nb_ctx) { /* Setup TX context descriptor if required */