From: Wei Dai Date: Thu, 22 Mar 2018 03:41:02 +0000 (+0800) Subject: net/ixgbe: convert to new Rx offloads API X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=ec3b1124d14d;p=dpdk.git net/ixgbe: convert to new Rx offloads API Ethdev Rx offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") This commit support the new Rx offloads API. Signed-off-by: Wei Dai Acked-by: Qi Zhang --- diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index fa1315e276..3a531e199e 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -2066,19 +2066,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; + if (mask & ETH_VLAN_STRIP_MASK) { ixgbe_vlan_hw_strip_config(dev); } if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ixgbe_vlan_hw_filter_enable(dev); else ixgbe_vlan_hw_filter_disable(dev); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) ixgbe_vlan_hw_extend_enable(dev); else ixgbe_vlan_hw_extend_disable(dev); @@ -2293,6 +2296,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; + struct rte_eth_dev_info dev_info; + uint64_t rx_offloads; int ret; PMD_INIT_FUNC_TRACE(); @@ -2304,6 +2309,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) return ret; } + ixgbe_dev_info_get(dev, &dev_info); + rx_offloads = dev->data->dev_conf.rxmode.offloads; + if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) { + PMD_DRV_LOG(ERR, "Some Rx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64, + rx_offloads, dev_info.rx_offload_capa); + return -ENOTSUP; + } + /* set flag to update link status after init */ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -3593,30 +3607,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) else dev_info->max_vmdq_pools = ETH_64_POOLS; dev_info->vmdq_queue_num = dev_info->max_rx_queues; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; - - /* - * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV - * mode. - */ - if ((hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) && - !RTE_ETH_DEV_SRIOV(dev).active) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | @@ -3636,10 +3629,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; #ifdef RTE_LIBRTE_SECURITY - if (dev->security_ctx) { - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + if (dev->security_ctx) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; - } #endif dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -3650,6 +3641,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3742,11 +3734,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | @@ -3762,6 +3752,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -4845,10 +4836,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 |= IXGBE_HLREG0_JUMBOEN; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4897,23 +4890,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) struct rte_eth_conf *conf = &dev->data->dev_conf; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; + struct rte_eth_dev_info dev_info; + uint64_t rx_offloads; PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); + ixgbevf_dev_info_get(dev, &dev_info); + rx_offloads = dev->data->dev_conf.rxmode.offloads; + if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) { + PMD_DRV_LOG(ERR, "Some Rx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64, + rx_offloads, dev_info.rx_offload_capa); + return -ENOTSUP; + } + /* * VF has no ability to enable/disable HW CRC * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC - if (!conf->rxmode.hw_strip_crc) { + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 1; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } #else - if (conf->rxmode.hw_strip_crc) { + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 0; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; } #endif @@ -5801,6 +5805,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode; uint32_t rf_dec, rf_int; uint32_t bcnrc_val; uint16_t link_speed = dev->data->dev_link.link_speed; @@ -5822,14 +5827,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, bcnrc_val = 0; } + rxmode = &dev->data->dev_conf.rxmode; /* * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise * set as 0x4. */ - if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len >= - IXGBE_MAX_JUMBO_FRAME_SIZE)) + if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && + (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME); else @@ -6176,7 +6181,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* refuse mtu that requires the support of scattered packets when this * feature has not been enabled before. */ - if (!rx_conf->enable_scatter && + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) return -EINVAL; diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c index 176ec0fdda..29e4728c7c 100644 --- a/drivers/net/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ixgbe/ixgbe_ipsec.c @@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reg; + uint64_t rx_offloads; + rx_offloads = dev->data->dev_conf.rxmode.offloads; /* sanity checks */ - if (dev->data->dev_conf.rxmode.enable_lro) { + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); return -1; } - if (!dev->data->dev_conf.rxmode.hw_strip_crc) { + if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec"); return -1; } @@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) { + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) { IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); if (reg != 0) { diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 5c45eb4a95..f6198f0696 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -2769,6 +2769,97 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) #endif } +static int +ixgbe_is_vf(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return 1; + default: + return 0; + } +} + +uint64_t +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + return offloads; +} + +uint64_t +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER; + + if (hw->mac.type == ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + if (ixgbe_is_vf(dev) == 0) + offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_EXTEND); + + /* + * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV + * mode. + */ + if ((hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) && + !RTE_ETH_DEV_SRIOV(dev).active) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) + offloads |= DEV_RX_OFFLOAD_SECURITY; +#endif + + return offloads; +} + +static int +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) +{ + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; + uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev); + uint64_t port_supported = ixgbe_get_rx_port_offloads(dev); + + if ((requested & (queue_supported | port_supported)) != requested) + return 0; + + if ((port_offloads ^ requested) & port_supported) + return 0; + + return 1; +} + int __attribute__((cold)) ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2787,6 +2878,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) { + PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported port offloads 0x%" PRIx64 + " or supported queue offloads 0x%" PRIx64, + (void *)dev, rx_conf->offloads, + dev->data->dev_conf.rxmode.offloads, + ixgbe_get_rx_port_offloads(dev), + ixgbe_get_rx_queue_offloads(dev)); + return -ENOTSUP; + } + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -2816,8 +2919,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; rxq->rx_deferred_start = rx_conf->rx_deferred_start; rxq->offloads = rx_conf->offloads; @@ -4575,7 +4678,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) rsc_capable = true; - if (!rsc_capable && rx_conf->enable_lro) { + if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't " "support it"); return -EINVAL; @@ -4583,7 +4686,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */ - if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) { + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) && + (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { /* * According to chapter of 4.6.7.2.1 of the Spec Rev. * 3.0 RSC configuration requires HW CRC stripping being @@ -4597,7 +4701,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RFCTL configuration */ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); - if ((rsc_capable) && (rx_conf->enable_lro)) + if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) /* * Since NFS packets coalescing is not supported - clear * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is @@ -4610,7 +4714,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* If LRO hasn't been requested - we are done here. */ - if (!rx_conf->enable_lro) + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) return 0; /* Set RDRXCTL.RSCACKC bit */ @@ -4730,7 +4834,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Configure CRC stripping, if any. */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - if (rx_conf->hw_strip_crc) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; else hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP; @@ -4738,7 +4842,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Configure jumbo frame support, if any. */ - if (rx_conf->jumbo_frame == 1) { + if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { hlreg0 |= IXGBE_HLREG0_JUMBOEN; maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); maxfrs &= 0x0000FFFF; @@ -4758,6 +4862,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -4766,7 +4875,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure. */ - rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN; + rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ? + 0 : ETHER_CRC_LEN; /* Setup the Base and Length of the Rx Descriptor Rings */ bus_addr = rxq->rx_ring_phys_addr; @@ -4780,28 +4890,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0); /* Configure the SRRCTL register */ -#ifdef RTE_HEADER_SPLIT_ENABLE - /* - * Configure Header Split - */ - if (rx_conf->header_split) { - if (hw->mac.type == ixgbe_mac_82599EB) { - /* Must setup the PSRTYPE register */ - uint32_t psrtype; - - psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR; - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype); - } - srrctl = ((rx_conf->split_hdr_size << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else -#endif - srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; /* Set if packets are dropped when no descriptors available */ if (rxq->drop_en) @@ -4827,9 +4916,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE > buf_size) dev->data->scattered_rx = 1; + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; } - if (rx_conf->enable_scatter) + if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) dev->data->scattered_rx = 1; /* @@ -4844,7 +4935,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; - if (rx_conf->hw_ip_checksum) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) rxcsum |= IXGBE_RXCSUM_IPPCSE; else rxcsum &= ~IXGBE_RXCSUM_IPPCSE; @@ -4854,7 +4945,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540) { rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - if (rx_conf->hw_strip_crc) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; else rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP; @@ -5260,6 +5351,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void @@ -5290,6 +5382,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; struct ixgbe_rx_queue *rxq; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; uint64_t bus_addr; uint32_t srrctl, psrtype = 0; uint16_t buf_size; @@ -5329,6 +5422,11 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) ixgbevf_rlpml_set_vf(hw, (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -5352,18 +5450,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) /* Configure the SRRCTL register */ -#ifdef RTE_HEADER_SPLIT_ENABLE - /* - * Configure Header Split - */ - if (dev->data->dev_conf.rxmode.header_split) { - srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else -#endif - srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; /* Set if packets are dropped when no descriptors available */ if (rxq->drop_en) @@ -5388,24 +5475,18 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << IXGBE_SRRCTL_BSIZEPKT_SHIFT); - if (dev->data->dev_conf.rxmode.enable_scatter || + if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER || /* It adds dual VLAN length for supporting dual VLAN */ - (dev->data->dev_conf.rxmode.max_rx_pkt_len + + (rxmode->max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; } - } -#ifdef RTE_HEADER_SPLIT_ENABLE - if (dev->data->dev_conf.rxmode.header_split) - /* Must setup the PSRTYPE register */ - psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR; -#endif + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } /* Set RQPL for VF RSS according to max Rx queue */ psrtype |= (dev->data->nb_rx_queues >> 1) << diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index ab5f01ee7b..30095fa4b0 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -307,5 +307,8 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); +uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev); + #endif /* RTE_IXGBE_INC_VECTOR */ #endif /* _IXGBE_RXTX_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h index 414840a2b9..a97c27189b 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h @@ -278,17 +278,12 @@ static inline int ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) { #ifndef RTE_LIBRTE_IEEE1588 - struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; /* no fdir support */ if (fconf->mode != RTE_FDIR_MODE_NONE) return -1; - /* no header split support */ - if (rxmode->header_split == 1) - return -1; - return 0; #else RTE_SET_USED(dev); diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c index e0f9998f0d..edb1383543 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c @@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; /* no csum error report support */ - if (rxmode->hw_ip_checksum == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) return -1; return ixgbe_rx_vec_dev_conf_condition_check_default(dev);