X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=c814a28cb49a0d9eed4f7630ee4544dd2eca993d;hb=3e455a97dcb23b6e3684aa48a0302cf0429f4286;hp=f41dc13d5eec5709442825a337058451a8be1d5a;hpb=26e9c3e20180954c94027ff18597d4d0e9548324;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index f41dc13d5e..c814a28cb4 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -33,7 +33,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -42,6 +43,7 @@ #include #include #include +#include #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -87,11 +89,6 @@ #define rte_ixgbe_prefetch(p) do {} while (0) #endif -#ifdef RTE_IXGBE_INC_VECTOR -uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); -#endif - /********************************************************************* * * TX functions @@ -313,7 +310,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* update tail pointer */ rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail); + IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail); return nb_pkts; } @@ -344,7 +341,6 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } -#ifdef RTE_IXGBE_INC_VECTOR static uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -366,7 +362,6 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } -#endif static inline void ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, @@ -459,7 +454,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, seqnum_seed |= tx_offload.l2_len << IXGBE_ADVTXD_TUNNEL_LEN; } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (ol_flags & PKT_TX_SEC_OFFLOAD) { union ixgbe_crypto_tx_desc_md *md = (union ixgbe_crypto_tx_desc_md *)mdata; @@ -588,11 +583,11 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; status = txr[desc_to_clean_to].wb.status; if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { - PMD_TX_FREE_LOG(DEBUG, - "TX descriptor %4u is not done" - "(port=%d queue=%d)", - desc_to_clean_to, - txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "TX descriptor %4u is not done" + "(port=%d queue=%d)", + desc_to_clean_to, + txq->port_id, txq->queue_id); /* Failed to clean any descriptors, better luck next time */ return -(1); } @@ -605,11 +600,11 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) nb_tx_to_clean = (uint16_t)(desc_to_clean_to - last_desc_cleaned); - PMD_TX_FREE_LOG(DEBUG, - "Cleaning %4u TX descriptors: %4u to %4u " - "(port=%d queue=%d)", - nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, - txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "Cleaning %4u TX descriptors: %4u to %4u " + "(port=%d queue=%d)", + nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, + txq->port_id, txq->queue_id); /* * The last descriptor to clean is done, so that means all the @@ -652,7 +647,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t ctx = 0; uint32_t new_ctx; union ixgbe_tx_offload tx_offload; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY uint8_t use_ipsec; #endif @@ -682,7 +677,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD); #endif @@ -696,11 +691,11 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.tso_segsz = tx_pkt->tso_segsz; tx_offload.outer_l2_len = tx_pkt->outer_l2_len; tx_offload.outer_l3_len = tx_pkt->outer_l3_len; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (use_ipsec) { union ixgbe_crypto_tx_desc_md *ipsec_mdata = (union ixgbe_crypto_tx_desc_md *) - &tx_pkt->udata64; + rte_security_dynfield(tx_pkt); tx_offload.sa_idx = ipsec_mdata->sa_idx; tx_offload.sec_pad_len = ipsec_mdata->pad_len; } @@ -755,12 +750,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * nb_used better be less than or equal to txq->tx_rs_thresh */ if (nb_used > txq->nb_tx_free) { - PMD_TX_FREE_LOG(DEBUG, - "Not enough free TX descriptors " - "nb_used=%4u nb_free=%4u " - "(port=%d queue=%d)", - nb_used, txq->nb_tx_free, - txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "Not enough free TX descriptors " + "nb_used=%4u nb_free=%4u " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->port_id, txq->queue_id); if (ixgbe_xmit_cleanup(txq) != 0) { /* Could not clean any descriptors */ @@ -771,17 +766,17 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* nb_used better be <= txq->tx_rs_thresh */ if (unlikely(nb_used > txq->tx_rs_thresh)) { - PMD_TX_FREE_LOG(DEBUG, - "The number of descriptors needed to " - "transmit the packet exceeds the " - "RS bit threshold. This will impact " - "performance." - "nb_used=%4u nb_free=%4u " - "tx_rs_thresh=%4u. " - "(port=%d queue=%d)", - nb_used, txq->nb_tx_free, - txq->tx_rs_thresh, - txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "The number of descriptors needed to " + "transmit the packet exceeds the " + "RS bit threshold. This will impact " + "performance." + "nb_used=%4u nb_free=%4u " + "tx_rs_thresh=%4u. " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->tx_rs_thresh, + txq->port_id, txq->queue_id); /* * Loop here until there are enough TX * descriptors or until the ring cannot be @@ -865,7 +860,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload, &tx_pkt->udata64); + tx_offload, + rte_security_dynfield(tx_pkt)); txe->last_id = tx_last; tx_id = txe->next_id; @@ -883,7 +879,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (use_ipsec) olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC; #endif @@ -924,10 +920,10 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Set RS bit only on threshold packets' last descriptor */ if (txq->nb_tx_used >= txq->tx_rs_thresh) { - PMD_TX_FREE_LOG(DEBUG, - "Setting RS bit on TXD id=" - "%4u (port=%d queue=%d)", - tx_last, txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); cmd_type_len |= IXGBE_TXD_CMD_RS; @@ -953,7 +949,7 @@ end_of_tx: PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); + IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); txq->tx_tail = tx_id; return nb_tx; @@ -993,7 +989,13 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return i; } -#ifdef RTE_LIBRTE_ETHDEV_DEBUG + /* check the size of packet */ + if (m->pkt_len < IXGBE_TX_MIN_PKT_LEN) { + rte_errno = EINVAL; + return i; + } + +#ifdef RTE_ETHDEV_DEBUG_TX ret = rte_validate_tx_offload(m); if (ret != 0) { rte_errno = -ret; @@ -1367,6 +1369,39 @@ const uint32_t RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, }; +static int +ixgbe_monitor_callback(const uint64_t value, + const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused) +{ + const uint64_t m = rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD); + /* + * we expect the DD bit to be set to 1 if this descriptor was already + * written to. + */ + return (value & m) == m ? -1 : 0; +} + +int +ixgbe_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_queue *rxq = rx_queue; + uint16_t desc; + + desc = rxq->rx_tail; + rxdp = &rxq->rx_ring[desc]; + /* watch for changes in status bit */ + pmc->addr = &rxdp->wb.upper.status_error; + + /* comparison callback */ + pmc->fn = ixgbe_monitor_callback; + + /* the registers are 32-bit */ + pmc->size = sizeof(uint32_t); + + return 0; +} + /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */ static inline uint32_t ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) @@ -1439,7 +1474,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) } static inline uint64_t -rx_desc_error_to_pkt_flags(uint32_t rx_status) +rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info, + uint8_t rx_udp_csum_zero_err) { uint64_t pkt_flags; @@ -1456,12 +1492,21 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) pkt_flags = error_to_pkt_flags_map[(rx_status >> IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; + /* Mask out the bad UDP checksum error if the hardware has UDP zero + * checksum error issue, so that the software application will then + * have to recompute the checksum itself if needed. + */ + if ((rx_status & IXGBE_RXDADV_ERR_TCPE) && + (pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && + rx_udp_csum_zero_err) + pkt_flags &= ~PKT_RX_L4_CKSUM_BAD; + if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) && (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) { - pkt_flags |= PKT_RX_EIP_CKSUM_BAD; + pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD; } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (rx_status & IXGBE_RXD_STAT_SECP) { pkt_flags |= PKT_RX_SEC_OFFLOAD; if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG) @@ -1542,7 +1587,9 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) /* convert descriptor fields to rte mbuf flags */ pkt_flags = rx_desc_status_to_pkt_flags(s[j], vlan_flags); - pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); + pkt_flags |= rx_desc_error_to_pkt_flags(s[j], + (uint16_t)pkt_info[j], + rxq->rx_udp_csum_zero_err); pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags ((uint16_t)pkt_info[j]); mb->ol_flags = pkt_flags; @@ -1693,7 +1740,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* update tail pointer */ rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, + IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, cur_free_trigger); } @@ -1875,7 +1922,9 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + pkt_flags = pkt_flags | + rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, + rxq->rx_udp_csum_zero_err); pkt_flags = pkt_flags | ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); rxm->ol_flags = pkt_flags; @@ -1919,7 +1968,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) nb_rx); rx_id = (uint16_t) ((rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1)); - IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + IXGBE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, rx_id); nb_hold = 0; } rxq->nb_rx_hold = nb_hold; @@ -1968,7 +2017,8 @@ ixgbe_fill_cluster_head_buf( head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags); - pkt_flags |= rx_desc_error_to_pkt_flags(staterr); + pkt_flags |= rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, + rxq->rx_udp_csum_zero_err); pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); head->ol_flags = pkt_flags; head->packet_type = @@ -2028,7 +2078,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, bool eop; struct ixgbe_rx_entry *rxe; struct ixgbe_scattered_rx_entry *sc_entry; - struct ixgbe_scattered_rx_entry *next_sc_entry; + struct ixgbe_scattered_rx_entry *next_sc_entry = NULL; struct ixgbe_rx_entry *next_rxe = NULL; struct rte_mbuf *first_seg; struct rte_mbuf *rxm; @@ -2097,8 +2147,9 @@ next_desc: if (!ixgbe_rx_alloc_bufs(rxq, false)) { rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, - next_rdt); + IXGBE_PCI_REG_WC_WRITE_RELAXED( + rxq->rdt_reg_addr, + next_rdt); nb_hold -= rxq->rx_free_thresh; } else { PMD_RX_LOG(DEBUG, "RX bulk alloc failed " @@ -2263,7 +2314,7 @@ next_desc: rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx); rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id); + IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id); nb_hold = 0; } @@ -2291,7 +2342,7 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, * **********************************************************************/ -static void __attribute__((cold)) +static void __rte_cold ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) { unsigned i; @@ -2400,11 +2451,12 @@ ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) { struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; if (txq->offloads == 0 && -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY !(txq->using_ipsec) && #endif txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) { if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 && (rte_eal_process_type() != RTE_PROC_PRIMARY || txq->sw_ring_v != NULL)) { return ixgbe_tx_done_cleanup_vec(txq, free_cnt); @@ -2416,7 +2468,7 @@ ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) return ixgbe_tx_done_cleanup_full(txq, free_cnt); } -static void __attribute__((cold)) +static void __rte_cold ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) { if (txq != NULL && @@ -2424,7 +2476,7 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) rte_free(txq->sw_ring); } -static void __attribute__((cold)) +static void __rte_cold ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq) { if (txq != NULL && txq->ops != NULL) { @@ -2434,14 +2486,14 @@ ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq) } } -void __attribute__((cold)) +void __rte_cold ixgbe_dev_tx_queue_release(void *txq) { ixgbe_tx_queue_release(txq); } /* (Re)set dynamic ixgbe_tx_queue fields to defaults */ -static void __attribute__((cold)) +static void __rte_cold ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) { static const union ixgbe_adv_tx_desc zeroed_desc = {{0}}; @@ -2491,25 +2543,24 @@ static const struct ixgbe_txq_ops def_txq_ops = { * the queue parameters. Used in tx_queue_setup by primary process and then * in dev_init by secondary process when attaching to an existing ethdev. */ -void __attribute__((cold)) +void __rte_cold ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if ((txq->offloads == 0) && -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY !(txq->using_ipsec) && #endif (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_prepare = NULL; -#ifdef RTE_IXGBE_INC_VECTOR if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 && (rte_eal_process_type() != RTE_PROC_PRIMARY || ixgbe_txq_vec_setup(txq) == 0)) { PMD_INIT_LOG(DEBUG, "Vector tx enabled."); dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; } else -#endif dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; } else { PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); @@ -2557,14 +2608,14 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev) hw->mac.type == ixgbe_mac_X550EM_a) tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (dev->security_ctx) tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; #endif return tx_offload_capa; } -int __attribute__((cold)) +int __rte_cold ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -2725,7 +2776,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->offloads = offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY); #endif @@ -2779,7 +2830,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * * @m scattered cluster head */ -static void __attribute__((cold)) +static void __rte_cold ixgbe_free_sc_cluster(struct rte_mbuf *m) { uint16_t i, nb_segs = m->nb_segs; @@ -2792,18 +2843,16 @@ ixgbe_free_sc_cluster(struct rte_mbuf *m) } } -static void __attribute__((cold)) +static void __rte_cold ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) { unsigned i; -#ifdef RTE_IXGBE_INC_VECTOR /* SSE Vector driver has a different way of releasing mbufs. */ if (rxq->rx_using_sse) { ixgbe_rx_queue_release_mbufs_vec(rxq); return; } -#endif if (rxq->sw_ring != NULL) { for (i = 0; i < rxq->nb_rx_desc; i++) { @@ -2831,7 +2880,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } } -static void __attribute__((cold)) +static void __rte_cold ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq) { if (rxq != NULL) { @@ -2842,7 +2891,7 @@ ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq) } } -void __attribute__((cold)) +void __rte_cold ixgbe_dev_rx_queue_release(void *rxq) { ixgbe_rx_queue_release(rxq); @@ -2856,7 +2905,7 @@ ixgbe_dev_rx_queue_release(void *rxq) * -EINVAL: the preconditions are NOT satisfied and the default Rx burst * function must be used. */ -static inline int __attribute__((cold)) +static inline int __rte_cold check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) { int ret = 0; @@ -2893,7 +2942,7 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) } /* Reset dynamic ixgbe_rx_queue fields back to defaults */ -static void __attribute__((cold)) +static void __rte_cold ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) { static const union ixgbe_adv_rx_desc zeroed_desc = {{0}}; @@ -2935,7 +2984,7 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; -#ifdef RTE_IXGBE_INC_VECTOR +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) rxq->rxrearm_start = 0; rxq->rxrearm_nb = 0; #endif @@ -3010,7 +3059,7 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) hw->mac.type == ixgbe_mac_X550EM_a) offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (dev->security_ctx) offloads |= DEV_RX_OFFLOAD_SECURITY; #endif @@ -3018,7 +3067,7 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) return offloads; } -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -3090,6 +3139,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, else rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599; + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as checksum + * errors. + */ + if (hw->mac.type == ixgbe_mac_82599EB) + rxq->rx_udp_csum_zero_err = 1; + /* * Allocate RX ring hardware descriptors. A memzone large enough to * handle the maximum ring size is allocated in order to allow for @@ -3249,7 +3305,7 @@ ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) if (unlikely(offset >= rxq->nb_rx_desc)) return -EINVAL; -#ifdef RTE_IXGBE_INC_VECTOR +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) if (rxq->rx_using_sse) nb_hold = rxq->rxrearm_nb; else @@ -3299,7 +3355,7 @@ ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) /* * Set up link loopback for X540/X550 mode Tx->Rx. */ -static inline void __attribute__((cold)) +static inline void __rte_cold ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable) { uint32_t macc; @@ -3327,7 +3383,7 @@ ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable) IXGBE_WRITE_REG(hw, IXGBE_MACC, macc); } -void __attribute__((cold)) +void __rte_cold ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; @@ -3373,12 +3429,14 @@ ixgbe_dev_free_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]); dev->data->rx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "rx_ring", i); } dev->data->nb_rx_queues = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]); dev->data->tx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "tx_ring", i); } dev->data->nb_tx_queues = 0; } @@ -4435,7 +4493,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); } -static int __attribute__((cold)) +static int __rte_cold ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) { struct ixgbe_rx_entry *rxe = rxq->sw_ring; @@ -4735,7 +4793,7 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) } } -void __attribute__((cold)) +void __rte_cold ixgbe_set_rx_function(struct rte_eth_dev *dev) { uint16_t i, rx_using_sse; @@ -4746,10 +4804,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) * conditions to be met and Rx Bulk Allocation should be allowed. */ if (ixgbe_rx_vec_dev_conf_condition_check(dev) || - !adapter->rx_bulk_alloc_allowed) { + !adapter->rx_bulk_alloc_allowed || + rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) { PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx " - "preconditions or RTE_IXGBE_INC_VECTOR is " - "not enabled", + "preconditions", dev->data->port_id); adapter->rx_vec_allowed = false; @@ -4838,7 +4896,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; rxq->rx_using_sse = rx_using_sse; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY); #endif @@ -4895,15 +4953,11 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RFCTL configuration */ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) - /* - * Since NFS packets coalescing is not supported - clear - * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is - * enabled. - */ - rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS | - IXGBE_RFCTL_NFSR_DIS); + rfctl &= ~IXGBE_RFCTL_RSC_DIS; else rfctl |= IXGBE_RFCTL_RSC_DIS; + /* disable NFS filtering */ + rfctl |= IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS; IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* If LRO hasn't been requested - we are done here. */ @@ -4989,7 +5043,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* * Initializes Receive Unit. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5166,7 +5220,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Initializes Transmit Unit. */ -void __attribute__((cold)) +void __rte_cold ixgbe_dev_tx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5255,7 +5309,7 @@ ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev) /* * Set up link for 82599 loopback mode Tx->Rx. */ -static inline void __attribute__((cold)) +static inline void __rte_cold ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) { PMD_INIT_FUNC_TRACE(); @@ -5283,7 +5337,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) /* * Start Transmit and Receive Units. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5350,7 +5404,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) ixgbe_setup_loopback_link_x540_x550(hw, true); } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) || (dev->data->dev_conf.txmode.offloads & @@ -5371,7 +5425,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) /* * Start Receive Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; @@ -5413,7 +5467,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* * Stop Receive Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; @@ -5453,7 +5507,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* * Start Transmit Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ixgbe_hw *hw; @@ -5492,7 +5546,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /* * Stop Transmit Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ixgbe_hw *hw; @@ -5589,7 +5643,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, /* * [VF] Initializes Receive Unit. */ -int __attribute__((cold)) +int __rte_cold ixgbevf_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5631,8 +5685,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way, * VF packets received can work in all cases. */ - ixgbevf_rlpml_set_vf(hw, - (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + if (ixgbevf_rlpml_set_vf(hw, + (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) { + PMD_INIT_LOG(ERR, "Set max packet length to %d failed.", + dev->data->dev_conf.rxmode.max_rx_pkt_len); + return -EINVAL; + } /* * Assume no header split and no VLAN strip support @@ -5713,7 +5771,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) /* * [VF] Initializes Transmit Unit. */ -void __attribute__((cold)) +void __rte_cold ixgbevf_dev_tx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5754,7 +5812,7 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev) /* * [VF] Start Transmit and Receive Units. */ -void __attribute__((cold)) +void __rte_cold ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5919,14 +5977,15 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, return 0; } -/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */ -__rte_weak int +/* Stubs needed for linkage when RTE_ARCH_PPC_64 is set */ +#if defined(RTE_ARCH_PPC_64) +int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) { return -1; } -__rte_weak uint16_t +uint16_t ixgbe_recv_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -5935,7 +5994,7 @@ ixgbe_recv_pkts_vec( return 0; } -__rte_weak uint16_t +uint16_t ixgbe_recv_scattered_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -5944,8 +6003,29 @@ ixgbe_recv_scattered_pkts_vec( return 0; } -__rte_weak int +int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq) { return -1; } + +uint16_t +ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int +ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq) +{ + return -1; +} + +void +ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue __rte_unused *rxq) +{ + return; +} +#endif