X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.c;h=ad58442319151dd4f5aba6489129dec6b7ec42e0;hb=6982f1848f985f89ecc4b16b3a09a504da3fec5b;hp=2fa737a97cde734ac6be21b4b663cd38035bc8c2;hpb=f1514bcb27214557d349ca96a94857421135a844;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 2fa737a97c..ad58442319 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -23,27 +23,40 @@ uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask; uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask; uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask; uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask; +uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask; static inline uint64_t -ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid) -{ - static uint64_t *ol_flag_map[] = { - [ICE_RXDID_COMMS_AUX_VLAN] = - &rte_net_ice_dynflag_proto_xtr_vlan_mask, - [ICE_RXDID_COMMS_AUX_IPV4] = - &rte_net_ice_dynflag_proto_xtr_ipv4_mask, - [ICE_RXDID_COMMS_AUX_IPV6] = - &rte_net_ice_dynflag_proto_xtr_ipv6_mask, - [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = - &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, - [ICE_RXDID_COMMS_AUX_TCP] = - &rte_net_ice_dynflag_proto_xtr_tcp_mask, +ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid) +{ + static struct { + uint64_t *ol_flag; + bool chk_valid; + } ol_flag_map[] = { + [ICE_RXDID_COMMS_AUX_VLAN] = { + &rte_net_ice_dynflag_proto_xtr_vlan_mask, true }, + [ICE_RXDID_COMMS_AUX_IPV4] = { + &rte_net_ice_dynflag_proto_xtr_ipv4_mask, true }, + [ICE_RXDID_COMMS_AUX_IPV6] = { + &rte_net_ice_dynflag_proto_xtr_ipv6_mask, true }, + [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = { + &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true }, + [ICE_RXDID_COMMS_AUX_TCP] = { + &rte_net_ice_dynflag_proto_xtr_tcp_mask, true }, + [ICE_RXDID_COMMS_AUX_IP_OFFSET] = { + &rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false }, }; uint64_t *ol_flag; - ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL; + if (rxdid < RTE_DIM(ol_flag_map)) { + ol_flag = ol_flag_map[rxdid].ol_flag; + if (!ol_flag) + return 0ULL; - return ol_flag != NULL ? *ol_flag : 0ULL; + *chk_valid = ol_flag_map[rxdid].chk_valid; + return *ol_flag; + } + + return 0ULL; } static inline uint8_t @@ -56,6 +69,7 @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6, [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW, [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP, + [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET, }; return xtr_type < RTE_DIM(rxdid_map) ? @@ -236,23 +250,12 @@ _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq) rxq->sw_ring[i].mbuf = NULL; } } -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC - if (rxq->rx_nb_avail == 0) - return; - for (i = 0; i < rxq->rx_nb_avail; i++) { - struct rte_mbuf *mbuf; - - mbuf = rxq->rx_stage[rxq->rx_next_avail + i]; - rte_pktmbuf_free_seg(mbuf); - } - rxq->rx_nb_avail = 0; -#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */ -} + if (rxq->rx_nb_avail == 0) + return; + for (i = 0; i < rxq->rx_nb_avail; i++) + rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]); -static void -ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq) -{ - rxq->rx_rel_mbufs(rxq); + rxq->rx_nb_avail = 0; } /* turn on or off rx queue @@ -309,16 +312,10 @@ ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on) } static inline int -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq) -#else -ice_check_rx_burst_bulk_alloc_preconditions - (__rte_unused struct ice_rx_queue *rxq) -#endif { int ret = 0; -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->rx_free_thresh=%d, " @@ -338,9 +335,6 @@ ice_check_rx_burst_bulk_alloc_preconditions rxq->nb_rx_desc, rxq->rx_free_thresh); ret = -EINVAL; } -#else - ret = -EINVAL; -#endif return ret; } @@ -357,17 +351,11 @@ ice_reset_rx_queue(struct ice_rx_queue *rxq) return; } -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC - if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0) - len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST); - else -#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */ - len = rxq->nb_rx_desc; + len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST); for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++) ((volatile char *)rxq->rx_ring)[i] = 0; -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); for (i = 0; i < ICE_RX_MAX_BURST; ++i) rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf; @@ -375,7 +363,6 @@ ice_reset_rx_queue(struct ice_rx_queue *rxq) rxq->rx_nb_avail = 0; rxq->rx_next_avail = 0; rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); -#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */ rxq->rx_tail = 0; rxq->nb_rx_hold = 0; @@ -424,12 +411,12 @@ ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* Init the RX tail register. */ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, true); if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); - ice_rx_queue_release_mbufs(rxq); + rxq->rx_rel_mbufs(rxq); ice_reset_rx_queue(rxq); return -EINVAL; } @@ -450,13 +437,13 @@ ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rx_queue_id < dev->data->nb_rx_queues) { rxq = dev->data->rx_queues[rx_queue_id]; - err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", rx_queue_id); return -EINVAL; } - ice_rx_queue_release_mbufs(rxq); + rxq->rx_rel_mbufs(rxq); ice_reset_rx_queue(rxq); dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -508,7 +495,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ - ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, + ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, ice_tlan_ctx_info); txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); @@ -630,7 +617,7 @@ ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* Init the RX tail register. */ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, true); if (err) { PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on", rx_queue_id); @@ -680,7 +667,7 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ - ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, + ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, ice_tlan_ctx_info); txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); @@ -719,11 +706,6 @@ _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq) } } } -static void -ice_tx_queue_release_mbufs(struct ice_tx_queue *txq) -{ - txq->tx_rel_mbufs(txq); -} static void ice_reset_tx_queue(struct ice_tx_queue *txq) @@ -799,7 +781,7 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -EINVAL; } - ice_tx_queue_release_mbufs(txq); + txq->tx_rel_mbufs(txq); ice_reset_tx_queue(txq); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -816,13 +798,13 @@ ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxq = pf->fdir.rxq; - err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off", rx_queue_id); return -EINVAL; } - ice_rx_queue_release_mbufs(rxq); + rxq->rx_rel_mbufs(rxq); return 0; } @@ -858,7 +840,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -EINVAL; } - ice_tx_queue_release_mbufs(txq); + txq->tx_rel_mbufs(txq); return 0; } @@ -926,13 +908,11 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, /* Allocate the maximun number of RX ring hardware descriptor. */ len = ICE_MAX_RING_DESC; -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC /** * Allocating a little more memory because vectorized/bulk_alloc Rx * functions doesn't check boundaries each time. */ len += ICE_RX_MAX_BURST; -#endif /* Allocate the maximum number of RX ring hardware descriptor. */ ring_size = sizeof(union ice_rx_flex_desc) * len; @@ -952,11 +932,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_ring_dma = rz->iova; rxq->rx_ring = rz->addr; -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC + /* always reserve more for bulk alloc */ len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST); -#else - len = nb_desc; -#endif /* Allocate the software ring. */ rxq->sw_ring = rte_zmalloc_socket(NULL, @@ -970,24 +947,21 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, } ice_reset_rx_queue(rxq); - rxq->q_set = TRUE; + rxq->q_set = true; dev->data->rx_queues[queue_idx] = rxq; rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq); if (!use_def_burst_func) { -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function will be " "used on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); -#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */ } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "not satisfied, Scattered Rx is requested, " - "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is " - "not enabled on port=%d, queue=%d.", + "not satisfied, Scattered Rx is requested. " + "on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); ad->rx_bulk_alloc_allowed = false; } @@ -1005,7 +979,7 @@ ice_rx_queue_release(void *rxq) return; } - ice_rx_queue_release_mbufs(q); + q->rx_rel_mbufs(q); rte_free(q->sw_ring); rte_free(q); } @@ -1183,7 +1157,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, } ice_reset_tx_queue(txq); - txq->q_set = TRUE; + txq->q_set = true; dev->data->tx_queues[queue_idx] = txq; txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; ice_set_tx_function_flag(dev, txq); @@ -1201,7 +1175,7 @@ ice_tx_queue_release(void *txq) return; } - ice_tx_queue_release_mbufs(q); + q->tx_rel_mbufs(q); rte_free(q->sw_ring); rte_free(q); } @@ -1352,23 +1326,32 @@ ice_rxd_to_proto_xtr(struct rte_mbuf *mb, volatile struct ice_32b_rx_flex_desc_comms *desc) { uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1); - uint32_t metadata; + uint32_t metadata = 0; uint64_t ol_flag; + bool chk_valid; - if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID))) - return; - - ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid); + ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid); if (unlikely(!ol_flag)) return; - mb->ol_flags |= ol_flag; + if (chk_valid) { + if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S)) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); - metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ? - rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0; + if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)) + metadata |= + rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; + } else { + if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); + else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1); + } + + if (!metadata) + return; - if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))) - metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; + mb->ol_flags |= ol_flag; *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata; } @@ -1399,7 +1382,6 @@ ice_rxd_to_pkt_fields(struct rte_mbuf *mb, #endif } -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC #define ICE_LOOK_AHEAD 8 #if (ICE_LOOK_AHEAD != 8) #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n" @@ -1620,15 +1602,6 @@ ice_recv_pkts_bulk_alloc(void *rx_queue, return nb_rx; } -#else -static uint16_t -ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue, - struct rte_mbuf __rte_unused **rx_pkts, - uint16_t __rte_unused nb_pkts) -{ - return 0; -} -#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */ static uint16_t ice_recv_scattered_pkts(void *rx_queue, @@ -1872,9 +1845,7 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev) ptypes = ptypes_os; if (dev->rx_pkt_burst == ice_recv_pkts || -#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc || -#endif dev->rx_pkt_burst == ice_recv_scattered_pkts) return ptypes; @@ -1945,24 +1916,6 @@ ice_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } -void -ice_clear_queues(struct rte_eth_dev *dev) -{ - uint16_t i; - - PMD_INIT_FUNC_TRACE(); - - for (i = 0; i < dev->data->nb_tx_queues; i++) { - ice_tx_queue_release_mbufs(dev->data->tx_queues[i]); - ice_reset_tx_queue(dev->data->tx_queues[i]); - } - - for (i = 0; i < dev->data->nb_rx_queues; i++) { - ice_rx_queue_release_mbufs(dev->data->rx_queues[i]); - ice_reset_rx_queue(dev->data->rx_queues[i]); - } -} - void ice_free_queues(struct rte_eth_dev *dev) { @@ -1975,6 +1928,7 @@ ice_free_queues(struct rte_eth_dev *dev) continue; ice_rx_queue_release(dev->data->rx_queues[i]); dev->data->rx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "rx_ring", i); } dev->data->nb_rx_queues = 0; @@ -1983,6 +1937,7 @@ ice_free_queues(struct rte_eth_dev *dev) continue; ice_tx_queue_release(dev->data->tx_queues[i]); dev->data->tx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "tx_ring", i); } dev->data->nb_tx_queues = 0; } @@ -2040,7 +1995,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf) * don't need to allocate software ring and reset for the fdir * program queue just set the queue has been configured. */ - txq->q_set = TRUE; + txq->q_set = true; pf->fdir.txq = txq; txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; @@ -2101,7 +2056,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf) * Don't need to allocate software ring and reset for the fdir * rx queue, just set the queue has been configured. */ - rxq->q_set = TRUE; + rxq->q_set = true; pf->fdir.rxq = rxq; rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; @@ -2471,12 +2426,14 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Check if the descriptor ring needs to be cleaned. */ if (txq->nb_tx_free < txq->tx_free_thresh) - ice_xmit_cleanup(txq); + (void)ice_xmit_cleanup(txq); for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { tx_pkt = *tx_pkts++; td_cmd = 0; + td_tag = 0; + td_offset = 0; ol_flags = tx_pkt->ol_flags; tx_offload.l2_len = tx_pkt->l2_len; tx_offload.l3_len = tx_pkt->l3_len; @@ -2535,10 +2492,9 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) &cd_tunneling_params); /* Enable checksum offloading */ - if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) { + if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) ice_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, tx_offload); - } if (nb_ctx) { /* Setup TX context descriptor if required */ @@ -2655,7 +2611,7 @@ end_of_tx: return nb_tx; } -static inline int __attribute__((always_inline)) +static __rte_always_inline int ice_tx_free_bufs(struct ice_tx_queue *txq) { struct ice_tx_entry *txep; @@ -2944,7 +2900,7 @@ ice_xmit_pkts_simple(void *tx_queue, return nb_tx; } -void __attribute__((cold)) +void __rte_cold ice_set_rx_function(struct rte_eth_dev *dev) { PMD_INIT_FUNC_TRACE(); @@ -3054,7 +3010,7 @@ ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, return ret; } -void __attribute__((cold)) +void __rte_cold ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq) { struct ice_adapter *ad = @@ -3123,7 +3079,7 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } -void __attribute__((cold)) +void __rte_cold ice_set_tx_function(struct rte_eth_dev *dev) { struct ice_adapter *ad = @@ -3752,7 +3708,7 @@ ice_get_default_pkt_type(uint16_t ptype) return type_table[ptype]; } -void __attribute__((cold)) +void __rte_cold ice_set_default_ptype_table(struct rte_eth_dev *dev) { struct ice_adapter *ad =