X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fi40e%2Fi40e_rxtx.c;h=bfe161f2c5c388690412440c68e7002a2d6a34ea;hb=c345c7d1acf43b4d30e1ecdd5a8cd3402234a6aa;hp=82a1709297c4c6e7187bdaed56b6230eee1e2235;hpb=ef8b7c505f10897621c0801d8ef3e961385246f8;p=dpdk.git diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 82a1709297..bfe161f2c5 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -302,17 +302,17 @@ i40e_txd_enable_checksum(uint64_t ol_flags, switch (ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (sizeof(struct tcp_hdr) >> 2) << + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case PKT_TX_SCTP_CKSUM: *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct sctp_hdr) >> 2) << + *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case PKT_TX_UDP_CKSUM: *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct udp_hdr) >> 2) << + *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: @@ -564,8 +564,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) } /* Update rx tail regsiter */ - rte_wmb(); - I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); @@ -889,17 +888,17 @@ i40e_recv_scattered_pkts(void *rx_queue, */ rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt_len -= ETHER_CRC_LEN; - if (rx_packet_len <= ETHER_CRC_LEN) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (rx_packet_len <= RTE_ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); first_seg->nb_segs--; last_seg->data_len = (uint16_t)(last_seg->data_len - - (ETHER_CRC_LEN - rx_packet_len)); + (RTE_ETHER_CRC_LEN - rx_packet_len)); last_seg->next = NULL; } else rxm->data_len = (uint16_t)(rx_packet_len - - ETHER_CRC_LEN); + RTE_ETHER_CRC_LEN); } first_seg->port = rxq->port_id; @@ -981,15 +980,9 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload) return ctx_desc; } - /** - * in case of non tunneling packet, the outer_l2_len and - * outer_l3_len must be 0. - */ - hdr_len = tx_offload.outer_l2_len + - tx_offload.outer_l3_len + - tx_offload.l2_len + - tx_offload.l3_len + - tx_offload.l4_len; + hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; + hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ? + tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0; cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = mbuf->pkt_len - hdr_len; @@ -1214,13 +1207,11 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } end_of_tx: - rte_wmb(); - PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id); + I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id); txq->tx_tail = tx_id; return nb_tx; @@ -1371,8 +1362,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq, txq->tx_tail = 0; /* Update the tx tail register */ - rte_wmb(); - I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail); + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); return nb_pkts; } @@ -1446,7 +1436,7 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if (!(ol_flags & PKT_TX_TCP_SEG)) { if (m->nb_segs > I40E_TX_MAX_MTU_SEG || m->pkt_len > I40E_FRAME_SIZE_MAX) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } } else if (m->nb_segs > I40E_TX_MAX_SEG || @@ -1456,31 +1446,31 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, /* MSS outside the range (256B - 9674B) are considered * malicious */ - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } /* check the size of packet */ if (m->pkt_len < I40E_TX_MIN_PKT_LEN) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -1550,8 +1540,6 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return err; } - rte_wmb(); - /* Init the RX tail regieter. */ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); @@ -1718,7 +1706,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM); int use_scattered_rx = - ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size); + (rxq->max_pkt_len > buf_size); if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) { PMD_DRV_LOG(ERR, @@ -1839,7 +1827,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) - rxq->crc_len = ETHER_CRC_LEN; + rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; rxq->drop_en = rx_conf->rx_drop_en; @@ -2169,15 +2157,30 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, * - tx_rs_thresh must be a divisor of the ring size. * - tx_free_thresh must be greater than 0. * - tx_free_thresh must be less than the size of the ring minus 3. + * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc. * * One descriptor in the TX ring is used as a sentinel to avoid a H/W * race condition, hence the maximum threshold constraints. When set * to zero use default values. */ - tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? - tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " "number of TX descriptors minus 2. " @@ -2423,13 +2426,13 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) struct rte_eth_dev *dev; uint16_t i; - dev = &rte_eth_devices[txq->port_id]; - if (!txq || !txq->sw_ring) { - PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL"); return; } + dev = &rte_eth_devices[txq->port_id]; + /** * vPMD tx will not set sw_ring's mbuf to NULL after free, * so need to free remains more carefully. @@ -2619,23 +2622,23 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len; rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len); if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - if (rxq->max_pkt_len <= ETHER_MAX_LEN || + if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must " "be larger than %u and smaller than %u," "as jumbo frame is enabled", - (uint32_t)ETHER_MAX_LEN, + (uint32_t)RTE_ETHER_MAX_LEN, (uint32_t)I40E_FRAME_SIZE_MAX); return I40E_ERR_CONFIG; } } else { - if (rxq->max_pkt_len < ETHER_MIN_LEN || - rxq->max_pkt_len > ETHER_MAX_LEN) { + if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || + rxq->max_pkt_len > RTE_ETHER_MAX_LEN) { PMD_DRV_LOG(ERR, "maximum packet length must be " "larger than %u and smaller than %u, " "as jumbo frame is disabled", - (uint32_t)ETHER_MIN_LEN, - (uint32_t)ETHER_MAX_LEN); + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); return I40E_ERR_CONFIG; } } @@ -2708,9 +2711,8 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) RTE_PKTMBUF_HEADROOM); /* Check if scattered RX needs to be used. */ - if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { + if (rxq->max_pkt_len > buf_size) dev_data->scattered_rx = 1; - } /* Init the RX tail regieter. */ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); @@ -2920,7 +2922,7 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, static eth_rx_burst_t i40e_get_latest_rx_vec(bool scatter) { -#ifdef RTE_ARCH_X86 +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) return scatter ? i40e_recv_scattered_pkts_vec_avx2 : i40e_recv_pkts_vec_avx2; @@ -2932,7 +2934,7 @@ i40e_get_latest_rx_vec(bool scatter) static eth_rx_burst_t i40e_get_recommend_rx_vec(bool scatter) { -#ifdef RTE_ARCH_X86 +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) /* * since AVX frequency can be different to base frequency, limit * use of AVX2 version to later plaforms, not all those that could @@ -3049,7 +3051,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) static eth_tx_burst_t i40e_get_latest_tx_vec(void) { -#ifdef RTE_ARCH_X86 +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) return i40e_xmit_pkts_vec_avx2; #endif @@ -3059,7 +3061,7 @@ i40e_get_latest_tx_vec(void) static eth_tx_burst_t i40e_get_recommend_tx_vec(void) { -#ifdef RTE_ARCH_X86 +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) /* * since AVX frequency can be different to base frequency, limit * use of AVX2 version to later plaforms, not all those that could @@ -3160,7 +3162,8 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev) ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] = (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD); - if (hw->mac.type == I40E_MAC_X722) { + if (hw->mac.type == I40E_MAC_X722 || + hw->mac.type == I40E_MAC_X722_VF) { ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP); ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= @@ -3182,14 +3185,15 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev) } } -/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ -__rte_weak int +#ifndef RTE_LIBRTE_I40E_INC_VECTOR +/* Stubs needed for linkage when CONFIG_RTE_LIBRTE_I40E_INC_VECTOR is set to 'n' */ +int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) { return -1; } -__rte_weak uint16_t +uint16_t i40e_recv_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -3198,7 +3202,7 @@ i40e_recv_pkts_vec( return 0; } -__rte_weak uint16_t +uint16_t i40e_recv_scattered_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -3207,52 +3211,55 @@ i40e_recv_scattered_pkts_vec( return 0; } -__rte_weak uint16_t -i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, - struct rte_mbuf __rte_unused **rx_pkts, - uint16_t __rte_unused nb_pkts) -{ - return 0; -} - -__rte_weak uint16_t -i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue, - struct rte_mbuf __rte_unused **rx_pkts, - uint16_t __rte_unused nb_pkts) -{ - return 0; -} - -__rte_weak int +int i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) { return -1; } -__rte_weak int +int i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) { return -1; } -__rte_weak void +void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq) { return; } -__rte_weak uint16_t +uint16_t i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue, struct rte_mbuf __rte_unused **tx_pkts, uint16_t __rte_unused nb_pkts) { return 0; } +#endif /* ifndef RTE_LIBRTE_I40E_INC_VECTOR */ + +#ifndef CC_AVX2_SUPPORT +uint16_t +i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t +i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} -__rte_weak uint16_t +uint16_t i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue, struct rte_mbuf __rte_unused **tx_pkts, uint16_t __rte_unused nb_pkts) { return 0; } +#endif /* ifndef CC_AVX2_SUPPORT */