X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=29d385c0620dc526b5dbe17855d571776871db78;hb=f30e69b41f949cd4a9afb6ff39de196e661708e2;hp=c9a70a823244d6ceef1cb3c85a275769ceaebab3;hpb=d54e3fe7dca0b371550611ad0c6585f8f67b45e5;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index c9a70a8232..29d385c062 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -87,11 +87,6 @@ #define rte_ixgbe_prefetch(p) do {} while (0) #endif -#ifdef RTE_IXGBE_INC_VECTOR -uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); -#endif - /********************************************************************* * * TX functions @@ -313,7 +308,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* update tail pointer */ rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail); + IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail); return nb_pkts; } @@ -344,7 +339,6 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } -#ifdef RTE_IXGBE_INC_VECTOR static uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -366,7 +360,6 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } -#endif static inline void ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, @@ -422,21 +415,24 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, case PKT_TX_UDP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_udp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; break; case PKT_TX_TCP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_tcp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; break; case PKT_TX_SCTP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_sctp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; break; @@ -950,7 +946,7 @@ end_of_tx: PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); + IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); txq->tx_tail = tx_id; return nb_tx; @@ -981,25 +977,31 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; + return i; + } + + /* check the size of packet */ + if (m->pkt_len < IXGBE_TX_MIN_PKT_LEN) { + rte_errno = EINVAL; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -1690,7 +1692,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* update tail pointer */ rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, + IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, cur_free_trigger); } @@ -1916,7 +1918,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) nb_rx); rx_id = (uint16_t) ((rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1)); - IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + IXGBE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, rx_id); nb_hold = 0; } rxq->nb_rx_hold = nb_hold; @@ -2025,11 +2027,11 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, bool eop; struct ixgbe_rx_entry *rxe; struct ixgbe_scattered_rx_entry *sc_entry; - struct ixgbe_scattered_rx_entry *next_sc_entry; + struct ixgbe_scattered_rx_entry *next_sc_entry = NULL; struct ixgbe_rx_entry *next_rxe = NULL; struct rte_mbuf *first_seg; struct rte_mbuf *rxm; - struct rte_mbuf *nmb; + struct rte_mbuf *nmb = NULL; union ixgbe_adv_rx_desc rxd; uint16_t data_len; uint16_t next_id; @@ -2094,8 +2096,9 @@ next_desc: if (!ixgbe_rx_alloc_bufs(rxq, false)) { rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, - next_rdt); + IXGBE_PCI_REG_WC_WRITE_RELAXED( + rxq->rdt_reg_addr, + next_rdt); nb_hold -= rxq->rx_free_thresh; } else { PMD_RX_LOG(DEBUG, "RX bulk alloc failed " @@ -2260,7 +2263,7 @@ next_desc: rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx); rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id); + IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id); nb_hold = 0; } @@ -2288,7 +2291,7 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, * **********************************************************************/ -static void __attribute__((cold)) +static void __rte_cold ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) { unsigned i; @@ -2303,7 +2306,117 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) } } -static void __attribute__((cold)) +static int +ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt) +{ + struct ixgbe_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && ixgbe_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (txq->tx_rs_thresh > txq->nb_tx_desc - + txq->nb_tx_free || tx_id == tx_last) + break; + + if (pkt_cnt < free_cnt) { + if (ixgbe_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +static int +ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_rs_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) + break; + + n = ixgbe_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +static int +ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused, + uint32_t free_cnt __rte_unused) +{ + return -ENOTSUP; +} + +int +ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) +{ + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + if (txq->offloads == 0 && +#ifdef RTE_LIBRTE_SECURITY + !(txq->using_ipsec) && +#endif + txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) { + if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + (rte_eal_process_type() != RTE_PROC_PRIMARY || + txq->sw_ring_v != NULL)) { + return ixgbe_tx_done_cleanup_vec(txq, free_cnt); + } else { + return ixgbe_tx_done_cleanup_simple(txq, free_cnt); + } + } + + return ixgbe_tx_done_cleanup_full(txq, free_cnt); +} + +static void __rte_cold ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) { if (txq != NULL && @@ -2311,7 +2424,7 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) rte_free(txq->sw_ring); } -static void __attribute__((cold)) +static void __rte_cold ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq) { if (txq != NULL && txq->ops != NULL) { @@ -2321,14 +2434,14 @@ ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq) } } -void __attribute__((cold)) +void __rte_cold ixgbe_dev_tx_queue_release(void *txq) { ixgbe_tx_queue_release(txq); } /* (Re)set dynamic ixgbe_tx_queue fields to defaults */ -static void __attribute__((cold)) +static void __rte_cold ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) { static const union ixgbe_adv_tx_desc zeroed_desc = {{0}}; @@ -2378,7 +2491,7 @@ static const struct ixgbe_txq_ops def_txq_ops = { * the queue parameters. Used in tx_queue_setup by primary process and then * in dev_init by secondary process when attaching to an existing ethdev. */ -void __attribute__((cold)) +void __rte_cold ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ @@ -2389,14 +2502,12 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_prepare = NULL; -#ifdef RTE_IXGBE_INC_VECTOR if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && (rte_eal_process_type() != RTE_PROC_PRIMARY || ixgbe_txq_vec_setup(txq) == 0)) { PMD_INIT_LOG(DEBUG, "Vector tx enabled."); dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; } else -#endif dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; } else { PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); @@ -2451,7 +2562,7 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev) return tx_offload_capa; } -int __attribute__((cold)) +int __rte_cold ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -2496,14 +2607,29 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * tx_rs_thresh must be a divisor of the ring size. * tx_free_thresh must be greater than 0. * tx_free_thresh must be less than the size of the ring minus 3. + * tx_free_thresh + tx_rs_thresh must not exceed nb_desc. * One descriptor in the TX ring is used as a sentinel to avoid a * H/W race condition, hence the maximum threshold constraints. * When set to zero use default values. */ - tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? - tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " "of TX descriptors minus 2. (tx_rs_thresh=%u " @@ -2651,7 +2777,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * * @m scattered cluster head */ -static void __attribute__((cold)) +static void __rte_cold ixgbe_free_sc_cluster(struct rte_mbuf *m) { uint16_t i, nb_segs = m->nb_segs; @@ -2664,18 +2790,16 @@ ixgbe_free_sc_cluster(struct rte_mbuf *m) } } -static void __attribute__((cold)) +static void __rte_cold ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) { unsigned i; -#ifdef RTE_IXGBE_INC_VECTOR /* SSE Vector driver has a different way of releasing mbufs. */ if (rxq->rx_using_sse) { ixgbe_rx_queue_release_mbufs_vec(rxq); return; } -#endif if (rxq->sw_ring != NULL) { for (i = 0; i < rxq->nb_rx_desc; i++) { @@ -2703,7 +2827,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } } -static void __attribute__((cold)) +static void __rte_cold ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq) { if (rxq != NULL) { @@ -2714,7 +2838,7 @@ ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq) } } -void __attribute__((cold)) +void __rte_cold ixgbe_dev_rx_queue_release(void *rxq) { ixgbe_rx_queue_release(rxq); @@ -2728,7 +2852,7 @@ ixgbe_dev_rx_queue_release(void *rxq) * -EINVAL: the preconditions are NOT satisfied and the default Rx burst * function must be used. */ -static inline int __attribute__((cold)) +static inline int __rte_cold check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) { int ret = 0; @@ -2765,7 +2889,7 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) } /* Reset dynamic ixgbe_rx_queue fields back to defaults */ -static void __attribute__((cold)) +static void __rte_cold ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) { static const union ixgbe_adv_rx_desc zeroed_desc = {{0}}; @@ -2807,7 +2931,7 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; -#ifdef RTE_IXGBE_INC_VECTOR +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) rxq->rxrearm_start = 0; rxq->rxrearm_nb = 0; #endif @@ -2853,14 +2977,15 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_KEEP_CRC | DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_SCATTER; + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_RSS_HASH; if (hw->mac.type == ixgbe_mac_82598EB) offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; if (ixgbe_is_vf(dev) == 0) - offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_VLAN_EXTEND); + offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; /* * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV @@ -2889,7 +3014,7 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) return offloads; } -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -2901,8 +3026,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, struct ixgbe_rx_queue *rxq; struct ixgbe_hw *hw; uint16_t len; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint64_t offloads; PMD_INIT_FUNC_TRACE(); @@ -2940,7 +3064,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) - rxq->crc_len = ETHER_CRC_LEN; + rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; rxq->drop_en = rx_conf->rx_drop_en; @@ -3121,7 +3245,7 @@ ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) if (unlikely(offset >= rxq->nb_rx_desc)) return -EINVAL; -#ifdef RTE_IXGBE_INC_VECTOR +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) if (rxq->rx_using_sse) nb_hold = rxq->rxrearm_nb; else @@ -3168,12 +3292,43 @@ ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } -void __attribute__((cold)) +/* + * Set up link loopback for X540/X550 mode Tx->Rx. + */ +static inline void __rte_cold +ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable) +{ + uint32_t macc; + PMD_INIT_FUNC_TRACE(); + + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + macc = IXGBE_READ_REG(hw, IXGBE_MACC); + + if (enable) { + /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */ + autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE; + /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */ + macc |= IXGBE_MACC_FLU; + } else { + autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE; + macc &= ~IXGBE_MACC_FLU; + } + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc); +} + +void __rte_cold ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -3194,6 +3349,14 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) ixgbe_reset_rx_queue(adapter, rxq); } } + /* If loopback mode was enabled, reconfigure the link accordingly */ + if (dev->data->dev_conf.lpbk_mode != 0) { + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, false); + } } void @@ -3206,12 +3369,14 @@ ixgbe_dev_free_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]); dev->data->rx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "rx_ring", i); } dev->data->nb_rx_queues = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]); dev->data->tx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "tx_ring", i); } dev->data->nb_tx_queues = 0; } @@ -3427,7 +3592,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); - adapter = (struct ixgbe_adapter *)dev->data->dev_private; + adapter = dev->data->dev_private; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); sp_reta_size = ixgbe_reta_size_get(hw->mac.type); @@ -3925,7 +4090,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; struct ixgbe_dcb_tc_config *tc; - uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_bw_conf *bw_conf = @@ -4267,7 +4433,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); } -static int __attribute__((cold)) +static int __rte_cold ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) { struct ixgbe_rx_entry *rxe = rxq->sw_ring; @@ -4497,7 +4663,7 @@ ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool) /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */ uint16_t maxdesc = - IPV4_MAX_PKT_LEN / + RTE_IPV4_MAX_PKT_LEN / (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); if (maxdesc >= 16) @@ -4567,12 +4733,11 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) } } -void __attribute__((cold)) +void __rte_cold ixgbe_set_rx_function(struct rte_eth_dev *dev) { uint16_t i, rx_using_sse; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; /* * In order to allow Vector Rx there are a few configuration @@ -4581,8 +4746,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) if (ixgbe_rx_vec_dev_conf_condition_check(dev) || !adapter->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx " - "preconditions or RTE_IXGBE_INC_VECTOR is " - "not enabled", + "preconditions", dev->data->port_id); adapter->rx_vec_allowed = false; @@ -4822,7 +4986,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* * Initializes Receive Unit. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -4908,7 +5072,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * call to configure. */ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) - rxq->crc_len = ETHER_CRC_LEN; + rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; @@ -4999,7 +5163,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Initializes Transmit Unit. */ -void __attribute__((cold)) +void __rte_cold ixgbe_dev_tx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5074,8 +5238,12 @@ ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) - if (hw->mac.type == ixgbe_mac_82599EB) + if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX) + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) return 0; return -ENOTSUP; @@ -5084,7 +5252,7 @@ ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev) /* * Set up link for 82599 loopback mode Tx->Rx. */ -static inline void __attribute__((cold)) +static inline void __rte_cold ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) { PMD_INIT_FUNC_TRACE(); @@ -5112,7 +5280,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) /* * Start Transmit and Receive Units. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5172,6 +5340,11 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) if (dev->data->dev_conf.lpbk_mode != 0) { if (hw->mac.type == ixgbe_mac_82599EB) ixgbe_setup_loopback_link_82599(hw); + else if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, true); } #ifdef RTE_LIBRTE_SECURITY @@ -5195,7 +5368,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) /* * Start Receive Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; @@ -5237,12 +5410,11 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* * Stop Receive Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_rx_queue *rxq; uint32_t rxdctl; int poll_ms; @@ -5278,7 +5450,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* * Start Transmit Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ixgbe_hw *hw; @@ -5317,7 +5489,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /* * Stop Transmit Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ixgbe_hw *hw; @@ -5414,7 +5586,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, /* * [VF] Initializes Receive Unit. */ -int __attribute__((cold)) +int __rte_cold ixgbevf_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5538,7 +5710,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) /* * [VF] Initializes Transmit Unit. */ -void __attribute__((cold)) +void __rte_cold ixgbevf_dev_tx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5579,7 +5751,7 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev) /* * [VF] Start Transmit and Receive Units. */ -void __attribute__((cold)) +void __rte_cold ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5744,14 +5916,15 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, return 0; } -/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */ -__rte_weak int +/* Stubs needed for linkage when CONFIG_RTE_ARCH_PPC_64 is set */ +#if defined(RTE_ARCH_PPC_64) +int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) { return -1; } -__rte_weak uint16_t +uint16_t ixgbe_recv_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -5760,7 +5933,7 @@ ixgbe_recv_pkts_vec( return 0; } -__rte_weak uint16_t +uint16_t ixgbe_recv_scattered_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -5769,8 +5942,29 @@ ixgbe_recv_scattered_pkts_vec( return 0; } -__rte_weak int +int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq) { return -1; } + +uint16_t +ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int +ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq) +{ + return -1; +} + +void +ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue __rte_unused *rxq) +{ + return; +} +#endif