X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_rxtx.c;h=bfe161f2c5c388690412440c68e7002a2d6a34ea;hb=c345c7d1acf43b4d30e1ecdd5a8cd3402234a6aa;hp=ee87651194e2097b8de9077ded57cc8f0401564f;hpb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;p=dpdk.git diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index ee87651194..bfe161f2c5 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -302,17 +302,17 @@ i40e_txd_enable_checksum(uint64_t ol_flags, switch (ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (sizeof(struct tcp_hdr) >> 2) << + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case PKT_TX_SCTP_CKSUM: *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct sctp_hdr) >> 2) << + *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case PKT_TX_UDP_CKSUM: *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct udp_hdr) >> 2) << + *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: @@ -564,8 +564,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) } /* Update rx tail regsiter */ - rte_wmb(); - I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); @@ -981,15 +980,9 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload) return ctx_desc; } - /** - * in case of non tunneling packet, the outer_l2_len and - * outer_l3_len must be 0. - */ - hdr_len = tx_offload.outer_l2_len + - tx_offload.outer_l3_len + - tx_offload.l2_len + - tx_offload.l3_len + - tx_offload.l4_len; + hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; + hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ? + tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0; cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = mbuf->pkt_len - hdr_len; @@ -1214,13 +1207,11 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } end_of_tx: - rte_wmb(); - PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id); + I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id); txq->tx_tail = tx_id; return nb_tx; @@ -1371,8 +1362,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq, txq->tx_tail = 0; /* Update the tx tail register */ - rte_wmb(); - I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail); + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); return nb_pkts; } @@ -1446,7 +1436,7 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if (!(ol_flags & PKT_TX_TCP_SEG)) { if (m->nb_segs > I40E_TX_MAX_MTU_SEG || m->pkt_len > I40E_FRAME_SIZE_MAX) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } } else if (m->nb_segs > I40E_TX_MAX_SEG || @@ -1456,31 +1446,31 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, /* MSS outside the range (256B - 9674B) are considered * malicious */ - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } /* check the size of packet */ if (m->pkt_len < I40E_TX_MIN_PKT_LEN) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -1550,8 +1540,6 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return err; } - rte_wmb(); - /* Init the RX tail regieter. */ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); @@ -2934,7 +2922,7 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, static eth_rx_burst_t i40e_get_latest_rx_vec(bool scatter) { -#ifdef RTE_ARCH_X86 +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) return scatter ? i40e_recv_scattered_pkts_vec_avx2 : i40e_recv_pkts_vec_avx2; @@ -2946,7 +2934,7 @@ i40e_get_latest_rx_vec(bool scatter) static eth_rx_burst_t i40e_get_recommend_rx_vec(bool scatter) { -#ifdef RTE_ARCH_X86 +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) /* * since AVX frequency can be different to base frequency, limit * use of AVX2 version to later plaforms, not all those that could @@ -3063,7 +3051,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) static eth_tx_burst_t i40e_get_latest_tx_vec(void) { -#ifdef RTE_ARCH_X86 +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) return i40e_xmit_pkts_vec_avx2; #endif @@ -3073,7 +3061,7 @@ i40e_get_latest_tx_vec(void) static eth_tx_burst_t i40e_get_recommend_tx_vec(void) { -#ifdef RTE_ARCH_X86 +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) /* * since AVX frequency can be different to base frequency, limit * use of AVX2 version to later plaforms, not all those that could @@ -3174,7 +3162,8 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev) ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] = (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD); - if (hw->mac.type == I40E_MAC_X722) { + if (hw->mac.type == I40E_MAC_X722 || + hw->mac.type == I40E_MAC_X722_VF) { ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP); ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= @@ -3196,14 +3185,15 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev) } } +#ifndef RTE_LIBRTE_I40E_INC_VECTOR /* Stubs needed for linkage when CONFIG_RTE_LIBRTE_I40E_INC_VECTOR is set to 'n' */ -__rte_weak int +int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) { return -1; } -__rte_weak uint16_t +uint16_t i40e_recv_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -3212,7 +3202,7 @@ i40e_recv_pkts_vec( return 0; } -__rte_weak uint16_t +uint16_t i40e_recv_scattered_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -3221,52 +3211,55 @@ i40e_recv_scattered_pkts_vec( return 0; } -__rte_weak uint16_t -i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, - struct rte_mbuf __rte_unused **rx_pkts, - uint16_t __rte_unused nb_pkts) -{ - return 0; -} - -__rte_weak uint16_t -i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue, - struct rte_mbuf __rte_unused **rx_pkts, - uint16_t __rte_unused nb_pkts) -{ - return 0; -} - -__rte_weak int +int i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) { return -1; } -__rte_weak int +int i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) { return -1; } -__rte_weak void +void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq) { return; } -__rte_weak uint16_t +uint16_t i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue, struct rte_mbuf __rte_unused **tx_pkts, uint16_t __rte_unused nb_pkts) { return 0; } +#endif /* ifndef RTE_LIBRTE_I40E_INC_VECTOR */ -__rte_weak uint16_t +#ifndef CC_AVX2_SUPPORT +uint16_t +i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t +i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue, struct rte_mbuf __rte_unused **tx_pkts, uint16_t __rte_unused nb_pkts) { return 0; } +#endif /* ifndef CC_AVX2_SUPPORT */