#include <rte_common.h>
#include <rte_log.h>
#include <rte_debug.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_dev.h>
#include <rte_ether.h>
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
static int nfp_net_start(struct rte_eth_dev *dev);
-static void nfp_net_stats_get(struct rte_eth_dev *dev,
+static int nfp_net_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void nfp_net_stats_reset(struct rte_eth_dev *dev);
static void nfp_net_stop(struct rte_eth_dev *dev);
static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
+static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
/*
* The offset of the queue controller queues in the PCIe Target. These
* happen to be at the same offset on the NFP6000 and the NFP3200 so
#define NFP_QCP_MAX_ADD 0x7f
#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
- (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+ (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
/* nfp_qcp_ptr - Read or Write Pointer of a queue */
enum nfp_qcp_ptr {
nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
}
-/*
- * Atomically reads link status information from global structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &dev->data->dev_link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/*
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static void
nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
{
return -EINVAL;
}
- /* Supporting VLAN insertion by default */
- if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
- new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
-
if (rxmode->jumbo_frame)
- /* this is handled in rte_eth_dev_configure */
+ hw->mtu = rxmode->max_rx_pkt_len;
- if (rxmode->hw_strip_crc) {
- PMD_INIT_LOG(INFO, "strip CRC not supported");
- return -EINVAL;
- }
+ if (!rxmode->hw_strip_crc)
+ PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable");
if (rxmode->enable_scatter) {
PMD_INIT_LOG(INFO, "Scatter not supported");
return -EINVAL;
}
+ /* If next capabilities are supported, configure them by default */
+
+ /* VLAN insertion */
+ if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
+ new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+
+ /* L2 broadcast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+ new_ctrl |= NFP_NET_CFG_CTRL_L2BC;
+
+ /* L2 multicast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+ new_ctrl |= NFP_NET_CFG_CTRL_L2MC;
+
+ /* TX checksum offload */
+ if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
+ new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+
+ /* LSO offload */
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+ new_ctrl |= NFP_NET_CFG_CTRL_LSO;
+
+ /* RX gather */
+ if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+ new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
+
if (!new_ctrl)
return 0;
hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
}
-static void nfp_net_read_mac(struct nfp_net_hw *hw)
+#define ETH_ADDR_LEN 6
+
+static void
+nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
+{
+ int i;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ dst[ETH_ADDR_LEN - i - 1] = src[i];
+}
+
+static int
+nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
+{
+ union eth_table_entry *entry;
+ int idx, i;
+
+ idx = port;
+ entry = hw->eth_table;
+
+ /* Reading NFP ethernet table obtained before */
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
+ if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
+ /* port not in use */
+ entry++;
+ continue;
+ }
+ if (idx == 0)
+ break;
+ idx--;
+ entry++;
+ }
+
+ if (i == NSP_ETH_MAX_COUNT)
+ return -EINVAL;
+
+ /*
+ * hw points to port0 private data. We need hw now pointing to
+ * right port.
+ */
+ hw += port;
+ nfp_eth_copy_mac_reverse((uint8_t *)&hw->mac_addr,
+ (uint8_t *)&entry->mac_addr);
+
+ return 0;
+}
+
+static void
+nfp_net_vf_read_mac(struct nfp_net_hw *hw)
{
uint32_t tmp;
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
uint32_t intr_vector;
rte_intr_enable(intr_handle);
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+
+ /* Checking RX mode */
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
+ if (!nfp_net_rss_config_default(dev))
+ update |= NFP_NET_CFG_UPDATE_RSS;
+ } else {
+ PMD_INIT_LOG(INFO, "RSS not supported");
+ return -EINVAL;
+ }
+ }
/* Enable device */
new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
struct nfp_net_hw *hw;
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
uint32_t nn_link_status;
+ int ret;
static const uint32_t ls_to_ethtool[] = {
[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- memset(&old, 0, sizeof(old));
- nfp_net_dev_atomic_read_link_status(dev, &old);
-
nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
memset(&link, 0, sizeof(struct rte_eth_link));
nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
NFP_NET_CFG_STS_LINK_RATE_MASK;
- if ((NFD_CFG_MAJOR_VERSION_of(hw->ver) < 4) ||
- ((NFD_CFG_MINOR_VERSION_of(hw->ver) == 4) &&
- (NFD_CFG_MINOR_VERSION_of(hw->ver) == 0)))
- /* We really do not know the speed wil old firmware */
+ if (nn_link_status >= RTE_DIM(ls_to_ethtool))
link.link_speed = ETH_SPEED_NUM_NONE;
- else {
- if (nn_link_status >= RTE_DIM(ls_to_ethtool))
- link.link_speed = ETH_SPEED_NUM_NONE;
- else
- link.link_speed = ls_to_ethtool[nn_link_status];
- }
+ else
+ link.link_speed = ls_to_ethtool[nn_link_status];
- if (old.link_status != link.link_status) {
- nfp_net_dev_atomic_write_link_status(dev, &link);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == 0) {
if (link.link_status)
PMD_DRV_LOG(INFO, "NIC Link is Up\n");
else
PMD_DRV_LOG(INFO, "NIC Link is Down\n");
- return 0;
}
-
- return -1;
+ return ret;
}
-static void
+static int
nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
int i;
/* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
+ memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
+
/* reading per RX ring stats */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
- if (stats)
+ if (stats) {
memcpy(stats, &nfp_dev_stats, sizeof(*stats));
+ return 0;
+ }
+ return -EINVAL;
}
static void
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = hw->mtu;
+ dev_info->max_rx_pktlen = hw->max_mtu;
/* Next should change when PF support is implemented */
dev_info->max_mac_addrs = 1;
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+ dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_NONFRAG_IPV6_UDP;
+
dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
- dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
- ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
- ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
if (hw->cap & NFP_NET_CFG_CTRL_LSO)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- memset(&link, 0, sizeof(link));
- nfp_net_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
if (link.link_status)
RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
- (int)(dev->data->port_id), (unsigned)link.link_speed,
+ dev->data->port_id, link.link_speed,
link.link_duplex == ETH_LINK_FULL_DUPLEX
? "full-duplex" : "half-duplex");
else
RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
- (int)(dev->data->port_id));
+ dev->data->port_id);
RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
pci_dev->addr.domain, pci_dev->addr.bus,
PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
- /* get the link status */
- memset(&link, 0, sizeof(link));
- nfp_net_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
nfp_net_link_update(dev, 0);
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
nfp_net_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
nfp_net_dev_link_status_print(dev);
if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
return -EINVAL;
+ /* mtu setting is forbidden if port is started */
+ if (dev->data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev->data->port_id);
+ return -EBUSY;
+ }
+
/* switch to jumbo mode if needed */
if ((uint32_t)mtu > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
}
/* Saving physical and virtual addresses for the RX ring */
- rxq->dma = (uint64_t)tz->phys_addr;
+ rxq->dma = (uint64_t)tz->iova;
rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
/* mbuf pointers array for referencing mbufs linked to RX descriptors */
"tx_free_thresh must be less than the number of TX "
"descriptors. (tx_free_thresh=%u port=%d "
"queue=%d)\n", (unsigned int)tx_free_thresh,
- (int)dev->data->port_id, (int)queue_idx);
+ dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
txq->txq_flags = tx_conf->txq_flags;
/* Saving physical and virtual addresses for the TX ring */
- txq->dma = (uint64_t)tz->phys_addr;
+ txq->dma = (uint64_t)tz->iova;
txq->txds = (struct nfp_net_tx_desc *)tz->addr;
/* mbuf pointers array for referencing mbufs linked to TX descriptors */
break;
}
+ rxds = &rxq->rxds[rxq->rd_p];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
/*
* Memory barrier to ensure that we won't do other
* reads before the DD bit.
*/
rte_rmb();
- rxds = &rxq->rxds[rxq->rd_p];
- if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
- break;
-
/*
* We got a packet. Let's alloc a new mbuff for refilling the
* free descriptor ring as soon as possible
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
if (unlikely(new_mb == NULL)) {
- RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u\n", (unsigned)rxq->port_id,
- (unsigned)rxq->qidx);
+ RTE_LOG_DP(DEBUG, PMD,
+ "RX mbuf alloc failed port_id=%u queue_id=%u\n",
+ rxq->port_id, (unsigned int)rxq->qidx);
nfp_net_mbuf_alloc_failed(rxq);
break;
}
mb->nb_segs = 1;
mb->next = NULL;
+ mb->port = rxq->port_id;
+
/* Checking the RSS flag */
nfp_net_set_hash(rxq, rxds, mb);
if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
- mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
/* Adding the mbuff to the mbuff array passed by the app */
return nb_hold;
PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
- (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold);
+ rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
nb_hold += rxq->nb_rx_hold;
rte_wmb();
if (nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
- (unsigned)rxq->port_id, (unsigned)rxq->qidx,
+ rxq->port_id, (unsigned int)rxq->qidx,
(unsigned)nb_hold, (unsigned)avail);
nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
nb_hold = 0;
*lmbuf = pkt;
dma_size = pkt->data_len;
- dma_addr = rte_mbuf_data_dma_addr(pkt);
+ dma_addr = rte_mbuf_data_iova(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
"%" PRIx64 "\n", dma_addr);
return i;
}
-static void
+static int
nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
uint32_t new_ctrl, update;
struct nfp_net_hw *hw;
+ int ret;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
new_ctrl = 0;
new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
if (new_ctrl == 0)
- return;
+ return 0;
update = NFP_NET_CFG_UPDATE_GEN;
- if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
- return;
+ ret = nfp_net_reconfig(hw, new_ctrl, update);
+ if (!ret)
+ hw->ctrl = new_ctrl;
- hw->ctrl = new_ctrl;
+ return ret;
}
-/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
static int
-nfp_net_reta_update(struct rte_eth_dev *dev,
+nfp_net_rss_reta_write(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
uint32_t reta, mask;
int i, j;
int idx, shift;
- uint32_t update;
struct nfp_net_hw *hw =
NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
- return -EINVAL;
-
if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
reta);
}
+ return 0;
+}
+
+/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
+static int
+nfp_net_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct nfp_net_hw *hw =
+ NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t update;
+ int ret;
+
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+ return -EINVAL;
+
+ ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
+ if (ret != 0)
+ return ret;
update = NFP_NET_CFG_UPDATE_RSS;
}
static int
-nfp_net_rss_hash_update(struct rte_eth_dev *dev,
+nfp_net_rss_hash_write(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- uint32_t update;
+ struct nfp_net_hw *hw;
+ uint64_t rss_hf;
uint32_t cfg_rss_ctrl = 0;
uint8_t key;
- uint64_t rss_hf;
int i;
- struct nfp_net_hw *hw;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- rss_hf = rss_conf->rss_hf;
-
- /* Checking if RSS is enabled */
- if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
- if (rss_hf != 0) { /* Enable RSS? */
- RTE_LOG(ERR, PMD, "RSS unsupported\n");
- return -EINVAL;
- }
- return 0; /* Nothing to do */
+ /* Writing the key byte a byte */
+ for (i = 0; i < rss_conf->rss_key_len; i++) {
+ memcpy(&key, &rss_conf->rss_key[i], 1);
+ nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
}
- if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
- RTE_LOG(ERR, PMD, "hash key too long\n");
- return -EINVAL;
- }
+ rss_hf = rss_conf->rss_hf;
if (rss_hf & ETH_RSS_IPV4)
cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
/* configuring where to apply the RSS hash */
nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
- /* Writing the key byte a byte */
- for (i = 0; i < rss_conf->rss_key_len; i++) {
- memcpy(&key, &rss_conf->rss_key[i], 1);
- nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
- }
-
/* Writing the key size */
nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
+ return 0;
+}
+
+static int
+nfp_net_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ uint32_t update;
+ uint64_t rss_hf;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ rss_hf = rss_conf->rss_hf;
+
+ /* Checking if RSS is enabled */
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
+ if (rss_hf != 0) { /* Enable RSS? */
+ RTE_LOG(ERR, PMD, "RSS unsupported\n");
+ return -EINVAL;
+ }
+ return 0; /* Nothing to do */
+ }
+
+ if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
+ RTE_LOG(ERR, PMD, "hash key too long\n");
+ return -EINVAL;
+ }
+
+ nfp_net_rss_hash_write(dev, rss_conf);
+
update = NFP_NET_CFG_UPDATE_RSS;
if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
return 0;
}
+static int
+nfp_net_rss_config_default(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rss_conf rss_conf;
+ struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
+ uint16_t rx_queues = dev->data->nb_rx_queues;
+ uint16_t queue;
+ int i, j, ret;
+
+ RTE_LOG(INFO, PMD, "setting default RSS conf for %u queues\n",
+ rx_queues);
+
+ nfp_reta_conf[0].mask = ~0x0;
+ nfp_reta_conf[1].mask = ~0x0;
+
+ queue = 0;
+ for (i = 0; i < 0x40; i += 8) {
+ for (j = i; j < (i + 8); j++) {
+ nfp_reta_conf[0].reta[j] = queue;
+ nfp_reta_conf[1].reta[j] = queue++;
+ queue %= rx_queues;
+ }
+ }
+ ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
+ if (ret != 0)
+ return ret;
+
+ dev_conf = &dev->data->dev_conf;
+ if (!dev_conf) {
+ RTE_LOG(INFO, PMD, "wrong rss conf");
+ return -EINVAL;
+ }
+ rss_conf = dev_conf->rx_adv_conf.rss_conf;
+
+ ret = nfp_net_rss_hash_write(dev, &rss_conf);
+
+ return ret;
+}
+
+
/* Initialise and register driver with DPDK Application */
static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.dev_configure = nfp_net_configure,
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
- /* hotplug is not possible with multiport PF */
- if (!hw->pf_multiport_enabled)
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
/* vNIC PF tx/rx BARs are a subset of PF PCI device */
hwport0->hw_queues += bar_offset;
+
+ /* Lets seize the chance to read eth table from hw */
+ if (nfp_nsp_eth_read_table(nspu_desc, &hw->eth_table))
+ return -ENODEV;
}
if (hw->is_pf) {
hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
- hw->mtu = hw->max_mtu;
+ hw->mtu = ETHER_MTU;
if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
hw->rx_offset = NFP_NET_RX_OFFSET;
PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
hw->ver, hw->max_mtu);
- PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap,
+ PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap,
hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
+ hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
+ hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
return -ENOMEM;
}
- nfp_net_read_mac(hw);
+ if (hw->is_pf) {
+ nfp_net_pf_read_mac(hwport0, port);
+ nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
+ } else {
+ nfp_net_vf_read_mac(hw);
+ }
if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
/* Using random mac addresses for VFs */
return ret;
}
+int nfp_logtype_init;
+int nfp_logtype_driver;
+
static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
{
+ struct rte_eth_dev *eth_dev;
+ struct nfp_net_hw *hw, *hwport0;
+ int port = 0;
+
+ eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
+ (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
+ port = get_pf_port_number(eth_dev->data->name);
+ hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ hw = &hwport0[port];
+ } else {
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ }
+ /* hotplug is not possible with multiport PF */
+ if (hw->pf_multiport_enabled)
+ return -ENOTSUP;
return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
}
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
+RTE_INIT(nfp_init_log);
+static void
+nfp_init_log(void)
+{
+ nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
+ if (nfp_logtype_init >= 0)
+ rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE);
+ nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver");
+ if (nfp_logtype_driver >= 0)
+ rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE);
+}
/*
* Local variables:
* c-file-style: "Linux"