-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium networks Ltd. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#include <assert.h>
#include <sys/queue.h>
#include <rte_alarm.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_dev.h>
#include <rte_eal.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_tailq.h>
#include "base/nicvf_plat.h"
#include "nicvf_svf.h"
#include "nicvf_logs.h"
+int nicvf_logtype_mbox;
+int nicvf_logtype_init;
+int nicvf_logtype_driver;
+
static void nicvf_dev_stop(struct rte_eth_dev *dev);
static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
bool cleanup);
-static inline int
-nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
+RTE_INIT(nicvf_init_log);
+static void
+nicvf_init_log(void)
{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
+ nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
+ if (nicvf_logtype_mbox >= 0)
+ rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
+ nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
+ if (nicvf_logtype_init >= 0)
+ rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
- return 0;
+ nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
+ if (nicvf_logtype_driver >= 0)
+ rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
}
-static inline void
-nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
+static void
+nicvf_link_status_update(struct nicvf *nic,
+ struct rte_eth_link *link)
{
- link->link_status = nic->link_up;
- link->link_duplex = ETH_LINK_AUTONEG;
+ memset(link, 0, sizeof(*link));
+
+ link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+
if (nic->duplex == NICVF_HALF_DUPLEX)
link->link_duplex = ETH_LINK_HALF_DUPLEX;
else if (nic->duplex == NICVF_FULL_DUPLEX)
link->link_duplex = ETH_LINK_FULL_DUPLEX;
link->link_speed = nic->speed;
- link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
+ link->link_autoneg = ETH_LINK_AUTONEG;
}
static void
{
struct rte_eth_dev *dev = arg;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_eth_link link;
if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
- if (dev->data->dev_conf.intr_conf.lsc)
- nicvf_set_eth_link_status(nic, &dev->data->dev_link);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ nicvf_link_status_update(nic, &link);
+ rte_eth_linkstatus_set(dev, &link);
+
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
}
rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
if (wait_to_complete) {
/* rte_eth_link_get() might need to wait up to 9 seconds */
for (i = 0; i < MAX_CHECK_TIME; i++) {
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
- if (link.link_status)
+ nicvf_link_status_update(nic, &link);
+ if (link.link_status == ETH_LINK_UP)
break;
rte_delay_ms(CHECK_INTERVAL);
}
} else {
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
+ nicvf_link_status_update(nic, &link);
}
- return nicvf_atomic_write_link_status(dev, &link);
+
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
size_t i;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
PMD_INIT_FUNC_TRACE();
* Refuse mtu that requires the support of scattered packets
* when this feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (dev->data->dev_started && !dev->data->scattered_rx &&
(frame_size + 2 * VLAN_TAG_SIZE > buffsz))
return -EINVAL;
return -EINVAL;
if (frame_size > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
+ if (nicvf_mbox_update_hw_max_frs(nic, mtu))
return -EINVAL;
- /* Update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
+ /* Update max_rx_pkt_len */
+ rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;
nic->mtu = mtu;
for (i = 0; i < nic->sqs_count; i++)
return -ENOTSUP;
}
-static void
+static int
nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
uint16_t qidx;
stats->opackets += port_stats.tx_bcast_frames_ok;
stats->opackets += port_stats.tx_mcast_frames_ok;
stats->oerrors = port_stats.tx_drops;
+
+ return 0;
}
static const uint32_t *
memset(rz->addr, 0, ring_size);
- rxq->phys = rz->phys_addr;
+ rxq->phys = rz->iova;
rxq->desc = rz->addr;
rxq->qlen_mask = desc_cnt - 1;
memset(rz->addr, 0, ring_size);
- sq->phys = rz->phys_addr;
+ sq->phys = rz->iova;
sq->desc = rz->addr;
sq->qlen_mask = desc_cnt - 1;
memset(rz->addr, 0, ring_size);
- rbdr->phys = rz->phys_addr;
+ rbdr->phys = rz->iova;
rbdr->tail = 0;
rbdr->next_tail = 0;
rbdr->desc = rz->addr;
static void
nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
- nicvf_phys_addr_t phy)
+ nicvf_iova_addr_t phy)
{
uint16_t qidx;
void *obj;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
multiseg = true;
break;
}
const struct rte_eth_txconf *tx_conf)
{
uint16_t tx_free_thresh;
- uint8_t is_single_pool;
+ bool is_single_pool;
struct nicvf_txq *txq;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
txq->nic = nic;
txq->queue_id = qidx;
txq->tx_free_thresh = tx_free_thresh;
- txq->txq_flags = tx_conf->txq_flags;
txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
- is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
- txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->offloads = offloads;
+
+ is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
/* Choose optimum free threshold value for multipool case */
if (!is_single_pool) {
nicvf_tx_queue_reset(txq);
- PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
+ PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
+ " phys=0x%" PRIx64 " offloads=0x%" PRIx64,
nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
- txq->phys);
+ txq->phys, txq->offloads);
dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
uint16_t rx_free_thresh;
struct nicvf_rxq *rxq;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
}
/* Mempool memory must be physically contiguous */
- if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
return -EINVAL;
}
nicvf_rx_queue_reset(rxq);
- PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+ PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
+ " phy=0x%" PRIx64 " offloads=0x%" PRIx64,
nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
- rte_mempool_avail_count(mp), rxq->phys);
+ rte_mempool_avail_count(mp), rxq->phys, offloads);
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
+ dev_info->speed_capa |= ETH_LINK_SPEED_40G;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
+ dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;
dev_info->max_rx_queues =
(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_tx_queues =
dev_info->max_mac_addrs = 1;
dev_info->max_vfs = pci_dev->max_vfs;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+ dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
+ dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+ dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
dev_info->reta_size = nic->rss_info.rss_size;
dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
- .txq_flags =
- ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOREFCOUNT |
- ETH_TXQ_FLAGS_NOMULTMEMP |
- ETH_TXQ_FLAGS_NOVLANOFFL |
- ETH_TXQ_FLAGS_NOXSUMSCTP,
+ .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM,
};
}
-static nicvf_phys_addr_t
+static nicvf_iova_addr_t
rbdr_rte_mempool_get(void *dev, void *opaque)
{
uint16_t qidx;
struct rte_mbuf *mbuf;
uint16_t rx_start, rx_end;
uint16_t tx_start, tx_end;
+ bool vlan_strip;
PMD_INIT_FUNC_TRACE();
nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
/* Configure VLAN Strip */
- nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
+ vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+ nicvf_vlan_hw_strip(nic, vlan_strip);
/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
* to the 64bit memory address.
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * VLAN_TAG_SIZE > buffsz)
dev->data->scattered_rx = 1;
- if (rx_conf->enable_scatter)
+ if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
dev->data->scattered_rx = 1;
/* Setup MTU based on max_rx_pkt_len or default */
- mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
+ mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
dev->data->dev_conf.rxmode.max_rx_pkt_len
- - ETHER_HDR_LEN - ETHER_CRC_LEN
- : ETHER_MTU;
+ - ETHER_HDR_LEN : ETHER_MTU;
if (nicvf_dev_set_mtu(dev, mtu)) {
PMD_INIT_LOG(ERR, "Failed to set default mtu size");
return -EINVAL;
}
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+ PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ }
+
if (txmode->mq_mode) {
PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
return -EINVAL;
return -EINVAL;
}
- if (!rxmode->hw_strip_crc) {
- PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
- rxmode->hw_strip_crc = 1;
- }
-
- if (rxmode->hw_ip_checksum) {
- PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
- rxmode->hw_ip_checksum = 0;
- }
-
if (rxmode->split_hdr_size) {
PMD_INIT_LOG(INFO, "Rxmode does not support split header");
return -EINVAL;
}
- if (rxmode->hw_vlan_filter) {
- PMD_INIT_LOG(INFO, "VLAN filter not supported");
- return -EINVAL;
- }
-
- if (rxmode->hw_vlan_extend) {
- PMD_INIT_LOG(INFO, "VLAN extended not supported");
- return -EINVAL;
- }
-
- if (rxmode->enable_lro) {
- PMD_INIT_LOG(INFO, "LRO not supported");
- return -EINVAL;
- }
-
if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
return -EINVAL;
goto fail;
}
- /* Detach port by returning postive error number */
+ /* Detach port by returning positive error number */
return ENOTSUP;
}
static struct rte_pci_driver rte_nicvf_pmd = {
.id_table = pci_id_nicvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
+ RTE_PCI_DRV_INTR_LSC,
.probe = nicvf_eth_pci_probe,
.remove = nicvf_eth_pci_remove,
};