X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=f4f0ee68acac5a88b12d8f49f8ae567f697d7831;hb=09d9ae1ac9820c216991edc0d3c853fe28d37a66;hp=9a79d18e498482e5cd06bea93427337839f7f1de;hpb=8832837c6e64a75be31f32c49da261db15054c0d;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 9a79d18e49..f4f0ee68ac 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -436,7 +436,8 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, case PKT_TX_SCTP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_sctp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; break; @@ -2029,7 +2030,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, struct ixgbe_rx_entry *next_rxe = NULL; struct rte_mbuf *first_seg; struct rte_mbuf *rxm; - struct rte_mbuf *nmb; + struct rte_mbuf *nmb = NULL; union ixgbe_adv_rx_desc rxd; uint16_t data_len; uint16_t next_id; @@ -2496,14 +2497,29 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * tx_rs_thresh must be a divisor of the ring size. * tx_free_thresh must be greater than 0. * tx_free_thresh must be less than the size of the ring minus 3. + * tx_free_thresh + tx_rs_thresh must not exceed nb_desc. * One descriptor in the TX ring is used as a sentinel to avoid a * H/W race condition, hence the maximum threshold constraints. * When set to zero use default values. */ - tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? - tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " "of TX descriptors minus 2. (tx_rs_thresh=%u " @@ -2853,14 +2869,14 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_KEEP_CRC | DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | DEV_RX_OFFLOAD_SCATTER; if (hw->mac.type == ixgbe_mac_82598EB) offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; if (ixgbe_is_vf(dev) == 0) - offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_VLAN_EXTEND); + offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; /* * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV @@ -2940,7 +2956,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) - rxq->crc_len = ETHER_CRC_LEN; + rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; rxq->drop_en = rx_conf->rx_drop_en; @@ -3168,12 +3184,44 @@ ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } +/* + * Set up link loopback for X540/X550 mode Tx->Rx. + */ +static inline void __attribute__((cold)) +ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable) +{ + uint32_t macc; + PMD_INIT_FUNC_TRACE(); + + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + macc = IXGBE_READ_REG(hw, IXGBE_MACC); + + if (enable) { + /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */ + autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE; + /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */ + macc |= IXGBE_MACC_FLU; + } else { + autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE; + macc &= ~IXGBE_MACC_FLU; + } + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc); +} + void __attribute__((cold)) ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -3194,6 +3242,14 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) ixgbe_reset_rx_queue(adapter, rxq); } } + /* If loopback mode was enabled, reconfigure the link accordingly */ + if (dev->data->dev_conf.lpbk_mode != 0) { + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, false); + } } void @@ -3925,7 +3981,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; struct ixgbe_dcb_tc_config *tc; - uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_bw_conf *bw_conf = @@ -4497,7 +4554,7 @@ ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool) /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */ uint16_t maxdesc = - IPV4_MAX_PKT_LEN / + RTE_IPV4_MAX_PKT_LEN / (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); if (maxdesc >= 16) @@ -4879,13 +4936,18 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; /* - * If loopback mode is configured for 82599, set LPBK bit. + * If loopback mode is configured, set LPBK bit. */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + if (dev->data->dev_conf.lpbk_mode != 0) { + rc = ixgbe_check_supported_loopback_mode(dev); + if (rc < 0) { + PMD_INIT_LOG(ERR, "Unsupported loopback mode"); + return rc; + } hlreg0 |= IXGBE_HLREG0_LPBK; - else + } else { hlreg0 &= ~IXGBE_HLREG0_LPBK; + } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4903,7 +4965,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * call to configure. */ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) - rxq->crc_len = ETHER_CRC_LEN; + rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; @@ -5061,6 +5123,25 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) ixgbe_dev_mq_tx_configure(dev); } +/* + * Check if requested loopback mode is supported + */ +int +ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX) + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + return 0; + + return -ENOTSUP; +} + /* * Set up link for 82599 loopback mode Tx->Rx. */ @@ -5148,10 +5229,16 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) rxctrl |= IXGBE_RXCTRL_RXEN; hw->mac.ops.enable_rx_dma(hw, rxctrl); - /* If loopback mode is enabled for 82599, set up the link accordingly */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) - ixgbe_setup_loopback_link_82599(hw); + /* If loopback mode is enabled, set up the link accordingly */ + if (dev->data->dev_conf.lpbk_mode != 0) { + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_setup_loopback_link_82599(hw); + else if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, true); + } #ifdef RTE_LIBRTE_SECURITY if ((dev->data->dev_conf.rxmode.offloads &