net/bnx2x: fix invalid free on unplug
[dpdk.git] / drivers / net / i40e / i40e_rxtx.c
index 4640a9c..85c44f5 100644 (file)
@@ -302,17 +302,17 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
        switch (ol_flags & PKT_TX_L4_MASK) {
        case PKT_TX_TCP_CKSUM:
                *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
-               *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+               *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
                break;
        case PKT_TX_SCTP_CKSUM:
                *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
-               *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
+               *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
                break;
        case PKT_TX_UDP_CKSUM:
                *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
-               *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+               *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
                break;
        default:
@@ -889,17 +889,17 @@ i40e_recv_scattered_pkts(void *rx_queue,
                 */
                rxm->next = NULL;
                if (unlikely(rxq->crc_len > 0)) {
-                       first_seg->pkt_len -= ETHER_CRC_LEN;
-                       if (rx_packet_len <= ETHER_CRC_LEN) {
+                       first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+                       if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
                                rte_pktmbuf_free_seg(rxm);
                                first_seg->nb_segs--;
                                last_seg->data_len =
                                        (uint16_t)(last_seg->data_len -
-                                       (ETHER_CRC_LEN - rx_packet_len));
+                                       (RTE_ETHER_CRC_LEN - rx_packet_len));
                                last_seg->next = NULL;
                        } else
                                rxm->data_len = (uint16_t)(rx_packet_len -
-                                                               ETHER_CRC_LEN);
+                                                       RTE_ETHER_CRC_LEN);
                }
 
                first_seg->port = rxq->port_id;
@@ -1446,7 +1446,7 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                if (!(ol_flags & PKT_TX_TCP_SEG)) {
                        if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
                            m->pkt_len > I40E_FRAME_SIZE_MAX) {
-                               rte_errno = -EINVAL;
+                               rte_errno = EINVAL;
                                return i;
                        }
                } else if (m->nb_segs > I40E_TX_MAX_SEG ||
@@ -1456,31 +1456,31 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                        /* MSS outside the range (256B - 9674B) are considered
                         * malicious
                         */
-                       rte_errno = -EINVAL;
+                       rte_errno = EINVAL;
                        return i;
                }
 
                if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
-                       rte_errno = -ENOTSUP;
+                       rte_errno = ENOTSUP;
                        return i;
                }
 
                /* check the size of packet */
                if (m->pkt_len < I40E_TX_MIN_PKT_LEN) {
-                       rte_errno = -EINVAL;
+                       rte_errno = EINVAL;
                        return i;
                }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
 #endif
                ret = rte_net_intel_cksum_prepare(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
        }
@@ -1839,7 +1839,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->reg_idx = reg_idx;
        rxq->port_id = dev->data->port_id;
        if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
-               rxq->crc_len = ETHER_CRC_LEN;
+               rxq->crc_len = RTE_ETHER_CRC_LEN;
        else
                rxq->crc_len = 0;
        rxq->drop_en = rx_conf->rx_drop_en;
@@ -2634,23 +2634,23 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
        len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
        rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
        if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+               if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
                        rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
                        PMD_DRV_LOG(ERR, "maximum packet length must "
                                    "be larger than %u and smaller than %u,"
                                    "as jumbo frame is enabled",
-                                   (uint32_t)ETHER_MAX_LEN,
+                                   (uint32_t)RTE_ETHER_MAX_LEN,
                                    (uint32_t)I40E_FRAME_SIZE_MAX);
                        return I40E_ERR_CONFIG;
                }
        } else {
-               if (rxq->max_pkt_len < ETHER_MIN_LEN ||
-                       rxq->max_pkt_len > ETHER_MAX_LEN) {
+               if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+                       rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
                        PMD_DRV_LOG(ERR, "maximum packet length must be "
                                    "larger than %u and smaller than %u, "
                                    "as jumbo frame is disabled",
-                                   (uint32_t)ETHER_MIN_LEN,
-                                   (uint32_t)ETHER_MAX_LEN);
+                                   (uint32_t)RTE_ETHER_MIN_LEN,
+                                   (uint32_t)RTE_ETHER_MAX_LEN);
                        return I40E_ERR_CONFIG;
                }
        }
@@ -2934,7 +2934,7 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 static eth_rx_burst_t
 i40e_get_latest_rx_vec(bool scatter)
 {
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
                return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
                                 i40e_recv_pkts_vec_avx2;
@@ -2946,7 +2946,7 @@ i40e_get_latest_rx_vec(bool scatter)
 static eth_rx_burst_t
 i40e_get_recommend_rx_vec(bool scatter)
 {
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
        /*
         * since AVX frequency can be different to base frequency, limit
         * use of AVX2 version to later plaforms, not all those that could
@@ -3063,7 +3063,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 static eth_tx_burst_t
 i40e_get_latest_tx_vec(void)
 {
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
                return i40e_xmit_pkts_vec_avx2;
 #endif
@@ -3073,7 +3073,7 @@ i40e_get_latest_tx_vec(void)
 static eth_tx_burst_t
 i40e_get_recommend_tx_vec(void)
 {
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
        /*
         * since AVX frequency can be different to base frequency, limit
         * use of AVX2 version to later plaforms, not all those that could
@@ -3196,14 +3196,15 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev)
        }
 }
 
+#ifndef RTE_LIBRTE_I40E_INC_VECTOR
 /* Stubs needed for linkage when CONFIG_RTE_LIBRTE_I40E_INC_VECTOR is set to 'n' */
-__rte_weak int
+int
 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
 {
        return -1;
 }
 
-__rte_weak uint16_t
+uint16_t
 i40e_recv_pkts_vec(
        void __rte_unused *rx_queue,
        struct rte_mbuf __rte_unused **rx_pkts,
@@ -3212,7 +3213,7 @@ i40e_recv_pkts_vec(
        return 0;
 }
 
-__rte_weak uint16_t
+uint16_t
 i40e_recv_scattered_pkts_vec(
        void __rte_unused *rx_queue,
        struct rte_mbuf __rte_unused **rx_pkts,
@@ -3221,52 +3222,55 @@ i40e_recv_scattered_pkts_vec(
        return 0;
 }
 
-__rte_weak uint16_t
-i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
-                       struct rte_mbuf __rte_unused **rx_pkts,
-                       uint16_t __rte_unused nb_pkts)
-{
-       return 0;
-}
-
-__rte_weak uint16_t
-i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
-                       struct rte_mbuf __rte_unused **rx_pkts,
-                       uint16_t __rte_unused nb_pkts)
-{
-       return 0;
-}
-
-__rte_weak int
+int
 i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
 {
        return -1;
 }
 
-__rte_weak int
+int
 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
 {
        return -1;
 }
 
-__rte_weak void
+void
 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
 {
        return;
 }
 
-__rte_weak uint16_t
+uint16_t
 i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
                          struct rte_mbuf __rte_unused **tx_pkts,
                          uint16_t __rte_unused nb_pkts)
 {
        return 0;
 }
+#endif /* ifndef RTE_LIBRTE_I40E_INC_VECTOR */
+
+#ifndef CC_AVX2_SUPPORT
+uint16_t
+i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
+                       struct rte_mbuf __rte_unused **rx_pkts,
+                       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
 
-__rte_weak uint16_t
+uint16_t
+i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
+                       struct rte_mbuf __rte_unused **rx_pkts,
+                       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+uint16_t
 i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
                          struct rte_mbuf __rte_unused **tx_pkts,
                          uint16_t __rte_unused nb_pkts)
 {
        return 0;
 }
+#endif /* ifndef CC_AVX2_SUPPORT */