X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=18938fc7a662d5b64ea161cec1feaa9e5247f356;hb=24ac604ef7469eb5773c2504b313dd00257f8df3;hp=9e2fbba3e2ac545a3dd962d801be06aa198467e3;hpb=6e8110cca64f69814ccb493efb5471f6e948ba44;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 9e2fbba3e2..18938fc7a6 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -58,6 +58,10 @@ #endif /* Bit Mask to indicate what bits required for building TX context */ #define IXGBE_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ PKT_TX_VLAN_PKT | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ @@ -2025,7 +2029,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, struct ixgbe_rx_entry *next_rxe = NULL; struct rte_mbuf *first_seg; struct rte_mbuf *rxm; - struct rte_mbuf *nmb; + struct rte_mbuf *nmb = NULL; union ixgbe_adv_rx_desc rxd; uint16_t data_len; uint16_t next_id; @@ -2492,14 +2496,29 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * tx_rs_thresh must be a divisor of the ring size. * tx_free_thresh must be greater than 0. * tx_free_thresh must be less than the size of the ring minus 3. + * tx_free_thresh + tx_rs_thresh must not exceed nb_desc. * One descriptor in the TX ring is used as a sentinel to avoid a * H/W race condition, hence the maximum threshold constraints. * When set to zero use default values. */ - tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? - tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " "of TX descriptors minus 2. (tx_rs_thresh=%u " @@ -2849,21 +2868,22 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_KEEP_CRC | DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | DEV_RX_OFFLOAD_SCATTER; if (hw->mac.type == ixgbe_mac_82598EB) offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; if (ixgbe_is_vf(dev) == 0) - offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_VLAN_EXTEND); + offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; /* * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV * mode. */ if ((hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) && + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550) && !RTE_ETH_DEV_SRIOV(dev).active) offloads |= DEV_RX_OFFLOAD_TCP_LRO; @@ -2935,7 +2955,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) - rxq->crc_len = ETHER_CRC_LEN; + rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; rxq->drop_en = rx_conf->rx_drop_en; @@ -3163,12 +3183,44 @@ ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } +/* + * Set up link loopback for X540/X550 mode Tx->Rx. + */ +static inline void __attribute__((cold)) +ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable) +{ + uint32_t macc; + PMD_INIT_FUNC_TRACE(); + + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + macc = IXGBE_READ_REG(hw, IXGBE_MACC); + + if (enable) { + /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */ + autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE; + /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */ + macc |= IXGBE_MACC_FLU; + } else { + autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE; + macc &= ~IXGBE_MACC_FLU; + } + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc); +} + void __attribute__((cold)) ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -3189,6 +3241,14 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) ixgbe_reset_rx_queue(adapter, rxq); } } + /* If loopback mode was enabled, reconfigure the link accordingly */ + if (dev->data->dev_conf.lpbk_mode != 0) { + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, false); + } } void @@ -3413,6 +3473,7 @@ static void ixgbe_rss_configure(struct rte_eth_dev *dev) { struct rte_eth_rss_conf rss_conf; + struct ixgbe_adapter *adapter; struct ixgbe_hw *hw; uint32_t reta; uint16_t i; @@ -3421,6 +3482,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); + adapter = (struct ixgbe_adapter *)dev->data->dev_private; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); sp_reta_size = ixgbe_reta_size_get(hw->mac.type); @@ -3430,16 +3492,18 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) * The byte-swap is needed because NIC registers are in * little-endian order. */ - reta = 0; - for (i = 0, j = 0; i < sp_reta_size; i++, j++) { - reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); - - if (j == dev->data->nb_rx_queues) - j = 0; - reta = (reta << 8) | j; - if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, reta_reg, - rte_bswap32(reta)); + if (adapter->rss_reta_updated == 0) { + reta = 0; + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + + if (j == dev->data->nb_rx_queues) + j = 0; + reta = (reta << 8) | j; + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, reta_reg, + rte_bswap32(reta)); + } } /* @@ -3916,7 +3980,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; struct ixgbe_dcb_tc_config *tc; - uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_bw_conf *bw_conf = @@ -4488,7 +4553,7 @@ ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool) /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */ uint16_t maxdesc = - IPV4_MAX_PKT_LEN / + RTE_IPV4_MAX_PKT_LEN / (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); if (maxdesc >= 16) @@ -4870,13 +4935,18 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; /* - * If loopback mode is configured for 82599, set LPBK bit. + * If loopback mode is configured, set LPBK bit. */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + if (dev->data->dev_conf.lpbk_mode != 0) { + rc = ixgbe_check_supported_loopback_mode(dev); + if (rc < 0) { + PMD_INIT_LOG(ERR, "Unsupported loopback mode"); + return rc; + } hlreg0 |= IXGBE_HLREG0_LPBK; - else + } else { hlreg0 &= ~IXGBE_HLREG0_LPBK; + } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4894,7 +4964,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * call to configure. */ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) - rxq->crc_len = ETHER_CRC_LEN; + rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; @@ -5052,6 +5122,25 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) ixgbe_dev_mq_tx_configure(dev); } +/* + * Check if requested loopback mode is supported + */ +int +ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX) + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + return 0; + + return -ENOTSUP; +} + /* * Set up link for 82599 loopback mode Tx->Rx. */ @@ -5139,10 +5228,16 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) rxctrl |= IXGBE_RXCTRL_RXEN; hw->mac.ops.enable_rx_dma(hw, rxctrl); - /* If loopback mode is enabled for 82599, set up the link accordingly */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) - ixgbe_setup_loopback_link_82599(hw); + /* If loopback mode is enabled, set up the link accordingly */ + if (dev->data->dev_conf.lpbk_mode != 0) { + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_setup_loopback_link_82599(hw); + else if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, true); + } #ifdef RTE_LIBRTE_SECURITY if ((dev->data->dev_conf.rxmode.offloads & @@ -5260,6 +5355,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); txq = dev->data->tx_queues[tx_queue_id]; + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); txdctl |= IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); @@ -5277,7 +5373,6 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) tx_queue_id); } rte_wmb(); - IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;