struct rte_eth_conf *dev_conf;
struct rte_eth_rxmode *rxmode;
struct rte_eth_txmode *txmode;
- uint32_t new_ctrl = 0;
- uint32_t update = 0;
struct nfp_net_hw *hw;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
}
/* Checking RX mode */
- if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
- if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
- update = NFP_NET_CFG_UPDATE_RSS;
- new_ctrl = NFP_NET_CFG_CTRL_RSS;
- } else {
- PMD_INIT_LOG(INFO, "RSS not supported");
- return -EINVAL;
- }
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+ !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
+ PMD_INIT_LOG(INFO, "RSS not supported");
+ return -EINVAL;
}
- if (rxmode->split_hdr_size) {
+ /* Checking RX offloads */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
PMD_INIT_LOG(INFO, "rxmode does not support split header");
return -EINVAL;
}
- if (rxmode->hw_ip_checksum) {
- if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
- new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
- } else {
- PMD_INIT_LOG(INFO, "RXCSUM not supported");
- return -EINVAL;
- }
- }
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
+ PMD_INIT_LOG(INFO, "RXCSUM not supported");
- if (rxmode->hw_vlan_filter) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
PMD_INIT_LOG(INFO, "VLAN filter not supported");
return -EINVAL;
}
- if (rxmode->hw_vlan_strip) {
- if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
- new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
- } else {
- PMD_INIT_LOG(INFO, "hw vlan strip not supported");
- return -EINVAL;
- }
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
+ PMD_INIT_LOG(INFO, "hw vlan strip not supported");
+ return -EINVAL;
}
- if (rxmode->hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
PMD_INIT_LOG(INFO, "VLAN extended not supported");
return -EINVAL;
}
- if (rxmode->jumbo_frame)
- hw->mtu = rxmode->max_rx_pkt_len;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ PMD_INIT_LOG(INFO, "LRO not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+ PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
+ PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
+ return -EINVAL;
+ }
- if (!rxmode->hw_strip_crc)
- PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable");
+ if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
+ PMD_INIT_LOG(INFO, "MACSEC strip not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
+ PMD_INIT_LOG(INFO, "MACSEC strip not supported");
+ return -EINVAL;
+ }
- if (rxmode->enable_scatter) {
+ if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
+ PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
+
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
PMD_INIT_LOG(INFO, "Scatter not supported");
return -EINVAL;
}
- /* If next capabilities are supported, configure them by default */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ PMD_INIT_LOG(INFO, "timestamp offfload not supported");
+ return -EINVAL;
+ }
- /* VLAN insertion */
- if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
- new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
+ PMD_INIT_LOG(INFO, "security offload not supported");
+ return -EINVAL;
+ }
- /* L2 broadcast */
- if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
- new_ctrl |= NFP_NET_CFG_CTRL_L2BC;
+ /* checking TX offloads */
+ if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
+ PMD_INIT_LOG(INFO, "vlan insert offload not supported");
+ return -EINVAL;
+ }
- /* L2 multicast */
- if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
- new_ctrl |= NFP_NET_CFG_CTRL_L2MC;
+ if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
+ PMD_INIT_LOG(INFO, "TX checksum offload not supported");
+ return -EINVAL;
+ }
- /* TX checksum offload */
- if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
- new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+ if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
+ PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
+ return -EINVAL;
+ }
- /* LSO offload */
- if (hw->cap & NFP_NET_CFG_CTRL_LSO)
- new_ctrl |= NFP_NET_CFG_CTRL_LSO;
+ if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_LSO)) {
+ PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
+ return -EINVAL;
+ }
- /* RX gather */
- if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
- new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
+ if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
+ PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
+ return -EINVAL;
+ }
- if (!new_ctrl)
- return 0;
+ if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
+ PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
+ return -EINVAL;
+ }
- update |= NFP_NET_CFG_UPDATE_GEN;
+ if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
+ PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
+ return -EINVAL;
+ }
- nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
- if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
- return -EIO;
+ if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
+ txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
+ txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
+ txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
+ PMD_INIT_LOG(INFO, "tunneling offload not supported");
+ return -EINVAL;
+ }
- hw->ctrl = new_ctrl;
+ if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+ PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
+ return -EINVAL;
+ }
+
+ if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
+ PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
+ return -EINVAL;
+ }
+
+ if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
+ PMD_INIT_LOG(INFO, "TX multisegs not supported");
+ return -EINVAL;
+ }
+
+ if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+ PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
+ return -EINVAL;
+ }
+
+ if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
+ PMD_INIT_LOG(INFO, "TX security offload not supported");
+ return -EINVAL;
+ }
return 0;
}
return 0;
}
+static uint32_t
+nfp_check_offloads(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
+ struct rte_eth_txmode *txmode;
+ uint32_t ctrl = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+ txmode = &dev_conf->txmode;
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+ ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
+ ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ hw->mtu = rxmode->max_rx_pkt_len;
+
+ if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+
+ /* L2 broadcast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+ ctrl |= NFP_NET_CFG_CTRL_L2BC;
+
+ /* L2 multicast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+ ctrl |= NFP_NET_CFG_CTRL_L2MC;
+
+ /* TX checksum offload */
+ if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+ txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
+ txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+ ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+
+ /* LSO offload */
+ if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ ctrl |= NFP_NET_CFG_CTRL_LSO;
+
+ /* RX gather */
+ if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ ctrl |= NFP_NET_CFG_CTRL_GATHER;
+
+ return ctrl;
+}
+
static int
nfp_net_start(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- struct rte_eth_conf *dev_conf;
- struct rte_eth_rxmode *rxmode;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
uint32_t intr_vector;
int ret;
/* Disabling queues just in case... */
nfp_net_disable_queues(dev);
- /* Writing configuration parameters in the device */
- nfp_net_params_setup(hw);
-
/* Enabling the required queues in the device */
nfp_net_enable_queues(dev);
rte_intr_enable(intr_handle);
+ new_ctrl = nfp_check_offloads(dev);
+
+ /* Writing configuration parameters in the device */
+ nfp_net_params_setup(hw);
+
dev_conf = &dev->data->dev_conf;
rxmode = &dev_conf->rxmode;
- /* Checking RX mode */
if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
- if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
- if (!nfp_net_rss_config_default(dev))
- update |= NFP_NET_CFG_UPDATE_RSS;
- } else {
- PMD_INIT_LOG(INFO, "RSS not supported");
- return -EINVAL;
- }
+ nfp_net_rss_config_default(dev);
+ update |= NFP_NET_CFG_UPDATE_RSS;
+ new_ctrl |= NFP_NET_CFG_CTRL_RSS;
}
+
/* Enable device */
- new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
+ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = DEFAULT_RX_PTHRESH,
},
.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
};
dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
-
- if (hw->cap & NFP_NET_CFG_CTRL_LSO)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
}
static const uint32_t *
const struct rte_memzone *tz;
struct nfp_net_rxq *rxq;
struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
return -EINVAL;
}
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+
+ if (rx_conf->offloads != rxmode->offloads) {
+ RTE_LOG(ERR, PMD, "queue %u rx offloads not as port offloads\n",
+ queue_idx);
+ RTE_LOG(ERR, PMD, "\tport: %" PRIx64 "\n", rxmode->offloads);
+ RTE_LOG(ERR, PMD, "\tqueue: %" PRIx64 "\n", rx_conf->offloads);
+ return -EINVAL;
+ }
+
/*
* Free memory prior to re-allocation if needed. This is the case after
* calling nfp_net_stop
struct nfp_net_txq *txq;
uint16_t tx_free_thresh;
struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_txmode *txmode;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
return -EINVAL;
}
+ dev_conf = &dev->data->dev_conf;
+ txmode = &dev_conf->txmode;
+
+ if (tx_conf->offloads != txmode->offloads) {
+ RTE_LOG(ERR, PMD, "queue %u tx offloads not as port offloads",
+ queue_idx);
+ return -EINVAL;
+ }
+
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh :
DEFAULT_TX_FREE_THRESH);
txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
/* Saving physical and virtual addresses for the TX ring */
txq->dma = (uint64_t)tz->iova;