#include "base/nicvf_plat.h"
#include "nicvf_ethdev.h"
-
+#include "nicvf_rxtx.h"
+#include "nicvf_svf.h"
#include "nicvf_logs.h"
+static void nicvf_dev_stop(struct rte_eth_dev *dev);
+static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
+static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
+ bool cleanup);
+
static inline int
nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *link)
static void
nicvf_interrupt(void *arg)
{
- struct nicvf *nic = arg;
+ struct rte_eth_dev *dev = arg;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
- if (nic->eth_dev->data->dev_conf.intr_conf.lsc)
- nicvf_set_eth_link_status(nic,
- &nic->eth_dev->data->dev_link);
- _rte_eth_dev_callback_process(nic->eth_dev,
- RTE_ETH_EVENT_INTR_LSC);
+ if (dev->data->dev_conf.intr_conf.lsc)
+ nicvf_set_eth_link_status(nic, &dev->data->dev_link);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
- nicvf_interrupt, nic);
+ nicvf_interrupt, dev);
+}
+
+static void __rte_unused
+nicvf_vf_interrupt(void *arg)
+{
+ struct nicvf *nic = arg;
+
+ nicvf_reg_poll_interrupts(nic);
+
+ rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
+ nicvf_vf_interrupt, nic);
}
static int
-nicvf_periodic_alarm_start(struct nicvf *nic)
+nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
{
- return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
- nicvf_interrupt, nic);
+ return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
}
static int
-nicvf_periodic_alarm_stop(struct nicvf *nic)
+nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
{
- return rte_eal_alarm_cancel(nicvf_interrupt, nic);
+ return rte_eal_alarm_cancel(fn, arg);
}
/*
return 0;
}
-static int
-nicvf_dev_get_reg_length(struct rte_eth_dev *dev __rte_unused)
-{
- return nicvf_reg_get_count();
-}
-
static int
nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
{
uint64_t *data = regs->data;
struct nicvf *nic = nicvf_pmd_priv(dev);
- if (data == NULL)
- return -EINVAL;
+ if (data == NULL) {
+ regs->length = nicvf_reg_get_count();
+ regs->width = THUNDERX_REG_BYTES;
+ return 0;
+ }
/* Support only full register dump */
if ((regs->length == 0) ||
stats->oerrors = port_stats.tx_drops;
}
+static const uint32_t *
+nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ size_t copied;
+ static uint32_t ptypes[32];
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ static const uint32_t ptypes_common[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ };
+ static const uint32_t ptypes_tunnel[] = {
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_TUNNEL_NVGRE,
+ };
+ static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
+
+ copied = sizeof(ptypes_common);
+ memcpy(ptypes, ptypes_common, copied);
+ if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
+ memcpy((char *)ptypes + copied, ptypes_tunnel,
+ sizeof(ptypes_tunnel));
+ copied += sizeof(ptypes_tunnel);
+ }
+
+ memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
+ if (dev->rx_pkt_burst == nicvf_recv_pkts ||
+ dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
+ return ptypes;
+
+ return NULL;
+}
+
static void
nicvf_dev_stats_reset(struct rte_eth_dev *dev)
{
}
static int
-nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx,
- uint32_t desc_cnt)
+nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
+ struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
{
const struct rte_memzone *rz;
- uint32_t ring_size = desc_cnt * sizeof(union cq_entry_t);
+ uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
- rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size,
- NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
+ rz = rte_eth_dma_zone_reserve(dev, "cq_ring", qidx, ring_size,
+ NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
if (rz == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
return -ENOMEM;
}
static int
-nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx,
- uint32_t desc_cnt)
+nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
+ struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
{
const struct rte_memzone *rz;
- uint32_t ring_size = desc_cnt * sizeof(union sq_entry_t);
+ uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
- rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size,
+ rz = rte_eth_dma_zone_reserve(dev, "sq", qidx, ring_size,
NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
if (rz == NULL) {
PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
return 0;
}
+static int
+nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint32_t desc_cnt, uint32_t buffsz)
+{
+ struct nicvf_rbdr *rbdr;
+ const struct rte_memzone *rz;
+ uint32_t ring_size;
+
+ assert(nic->rbdr == NULL);
+ rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (rbdr == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
+ return -ENOMEM;
+ }
+
+ ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
+ rz = rte_eth_dma_zone_reserve(dev, "rbdr", 0, ring_size,
+ NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
+ if (rz == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
+ return -ENOMEM;
+ }
+
+ memset(rz->addr, 0, ring_size);
+
+ rbdr->phys = rz->phys_addr;
+ rbdr->tail = 0;
+ rbdr->next_tail = 0;
+ rbdr->desc = rz->addr;
+ rbdr->buffsz = buffsz;
+ rbdr->qlen_mask = desc_cnt - 1;
+ rbdr->rbdr_status =
+ nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
+ rbdr->rbdr_door =
+ nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
+
+ nic->rbdr = rbdr;
+ return 0;
+}
+
+static void
+nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic __rte_unused,
+ nicvf_phys_addr_t phy)
+{
+ uint16_t qidx;
+ void *obj;
+ struct nicvf_rxq *rxq;
+
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ if (rxq->precharge_cnt) {
+ obj = (void *)nicvf_mbuff_phy2virt(phy,
+ rxq->mbuf_phys_off);
+ rte_mempool_put(rxq->pool, obj);
+ rxq->precharge_cnt--;
+ break;
+ }
+ }
+}
+
+static inline void
+nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
+{
+ uint32_t qlen_mask, head;
+ struct rbdr_entry_t *entry;
+ struct nicvf_rbdr *rbdr = nic->rbdr;
+
+ qlen_mask = rbdr->qlen_mask;
+ head = rbdr->head;
+ while (head != rbdr->tail) {
+ entry = rbdr->desc + head;
+ nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
+ head++;
+ head = head & qlen_mask;
+ }
+}
+
static inline void
nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
{
txq->xmit_bufs = 0;
}
+static inline int
+nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf_txq *txq;
+ int ret;
+
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ txq = dev->data->tx_queues[qidx];
+ txq->pool = NULL;
+ ret = nicvf_qset_sq_config(nicvf_pmd_priv(dev), qidx, txq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure sq %d %d", qidx, ret);
+ goto config_sq_error;
+ }
+
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return ret;
+
+config_sq_error:
+ nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx);
+ return ret;
+}
+
+static inline int
+nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
+{
+ struct nicvf_txq *txq;
+ int ret;
+
+ assert(qidx < MAX_SND_QUEUES_PER_QS);
+
+ if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ ret = nicvf_qset_sq_reclaim(nic, qidx);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
+ nic->vf_id, qidx, ret);
+
+ txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
+ nicvf_tx_queue_release_mbufs(txq);
+ nicvf_tx_queue_reset(txq);
+
+ dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ return ret;
+}
+
+static inline int
+nicvf_configure_cpi(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint16_t qidx, qcnt;
+ int ret;
+
+ /* Count started rx queues */
+ for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
+ if (dev->data->rx_queue_state[qidx] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
+ qcnt++;
+
+ nic->cpi_alg = CPI_ALG_NONE;
+ ret = nicvf_mbox_config_cpi(nic, qcnt);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
+
+ return ret;
+}
+
+static inline int
+nicvf_configure_rss(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t rsshf;
+ int ret = -EINVAL;
+
+ rsshf = nicvf_rss_ethdev_to_nic(nic,
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
+ PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
+ dev->data->dev_conf.rxmode.mq_mode,
+ dev->data->nb_rx_queues,
+ dev->data->dev_conf.lpbk_mode, rsshf);
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ ret = nicvf_rss_term(nic);
+ else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
+
+ return ret;
+}
+
+static int
+nicvf_configure_rss_reta(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ unsigned int idx, qmap_size;
+ uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
+ uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
+
+ if (nic->cpi_alg != CPI_ALG_NONE)
+ return -EINVAL;
+
+ /* Prepare queue map */
+ for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
+ if (dev->data->rx_queue_state[idx] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
+ qmap[qmap_size++] = idx;
+ }
+
+ /* Update default RSS RETA */
+ for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+ default_reta[idx] = qmap[idx % qmap_size];
+
+ return nicvf_rss_reta_update(nic, default_reta,
+ NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
static void
nicvf_dev_tx_queue_release(void *sq)
{
}
}
+static void
+nicvf_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct nicvf_txq *txq;
+ size_t i;
+ bool multiseg = false;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
+ multiseg = true;
+ break;
+ }
+ }
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (multiseg) {
+ PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
+ dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
+ dev->tx_pkt_burst = nicvf_xmit_pkts;
+ }
+
+ if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
+ PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
+ else
+ PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
+}
+
+static void
+nicvf_set_rx_function(struct rte_eth_dev *dev)
+{
+ if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
+ dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
+ dev->rx_pkt_burst = nicvf_recv_pkts;
+ }
+}
+
static int
nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t nb_desc, unsigned int socket_id,
(tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
NICVF_TX_FREE_MPOOL_THRESH :
tx_conf->tx_free_thresh);
+ txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
+ } else {
+ txq->pool_free = nicvf_single_pool_free_xmited_buffers;
}
/* Allocate software ring */
return -ENOMEM;
}
- if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) {
+ if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
nicvf_dev_tx_queue_release(txq);
return -ENOMEM;
return 0;
}
+static inline void
+nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
+{
+ uint32_t rxq_cnt;
+ uint32_t nb_pkts, released_pkts = 0;
+ uint32_t refill_cnt = 0;
+ struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
+
+ if (dev->rx_pkt_burst == NULL)
+ return;
+
+ while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) {
+ nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
+ NICVF_MAX_RX_FREE_THRESH);
+ PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
+ while (nb_pkts) {
+ rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
+ released_pkts++;
+ }
+ }
+
+ refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id);
+ PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
+ released_pkts, refill_cnt);
+}
+
static void
nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
{
rxq->recv_buffers = 0;
}
+static inline int
+nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct nicvf_rxq *rxq;
+ int ret;
+
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ /* Update rbdr pointer to all rxq */
+ rxq = dev->data->rx_queues[qidx];
+ rxq->shared_rbdr = nic->rbdr;
+
+ ret = nicvf_qset_rq_config(nic, qidx, rxq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret);
+ goto config_rq_error;
+ }
+ ret = nicvf_qset_cq_config(nic, qidx, rxq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret);
+ goto config_cq_error;
+ }
+
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+
+config_cq_error:
+ nicvf_qset_cq_reclaim(nic, qidx);
+config_rq_error:
+ nicvf_qset_rq_reclaim(nic, qidx);
+ return ret;
+}
+
+static inline int
+nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
+{
+ struct nicvf_rxq *rxq;
+ int ret, other_error;
+
+ if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ ret = nicvf_qset_rq_reclaim(nic, qidx);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
+ nic->vf_id, qidx, ret);
+
+ other_error = ret;
+ rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
+ nicvf_rx_queue_release_mbufs(dev, rxq);
+ nicvf_rx_queue_reset(rxq);
+
+ ret = nicvf_qset_cq_reclaim(nic, qidx);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
+ nic->vf_id, qidx, ret);
+
+ other_error |= ret;
+ dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ return other_error;
+}
+
static void
nicvf_dev_rx_queue_release(void *rx_queue)
{
- struct nicvf_rxq *rxq = rx_queue;
-
PMD_INIT_FUNC_TRACE();
- if (rxq)
- rte_free(rxq);
+ rte_free(rx_queue);
+}
+
+static int
+nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ int ret;
+
+ ret = nicvf_start_rx_queue(dev, qidx);
+ if (ret)
+ return ret;
+
+ ret = nicvf_configure_cpi(dev);
+ if (ret)
+ return ret;
+
+ return nicvf_configure_rss_reta(dev);
+}
+
+static int
+nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ int ret;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_RCV_QUEUES_PER_QS;
+
+ ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
+ ret |= nicvf_configure_cpi(dev);
+ ret |= nicvf_configure_rss_reta(dev);
+ return ret;
+}
+
+static int
+nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ return nicvf_start_tx_queue(dev, qidx);
+}
+
+static int
+nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_SND_QUEUES_PER_QS;
+
+ return nicvf_vf_stop_tx_queue(dev, nic, qidx);
}
+
static int
nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t nb_desc, unsigned int socket_id,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
- /* Mempool memory should be contiguous */
+ /* Mempool memory must be contiguous, so must be one memory segment*/
if (mp->nb_mem_chunks != 1) {
- PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz");
+ PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
+ return -EINVAL;
+ }
+
+ /* Mempool memory must be physically contiguous */
+ if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
+ PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
return -EINVAL;
}
rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
rxq->precharge_cnt = 0;
- rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+
+ if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
+ rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
+ else
+ rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+
/* Alloc completion queue */
- if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) {
+ if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
nicvf_dev_rx_queue_release(rxq);
return -ENOMEM;
PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
qidx, rxq, mp->name, nb_desc,
- rte_mempool_count(mp), rxq->phys);
+ rte_mempool_avail_count(mp), rxq->phys);
dev->data->rx_queues[qidx] = rxq;
dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
};
}
+static nicvf_phys_addr_t
+rbdr_rte_mempool_get(void *dev, void *opaque)
+{
+ uint16_t qidx;
+ uintptr_t mbuf;
+ struct nicvf_rxq *rxq;
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
+ struct nicvf *nic __rte_unused = (struct nicvf *)opaque;
+
+ for (qidx = 0; qidx < eth_dev->data->nb_rx_queues; qidx++) {
+ rxq = eth_dev->data->rx_queues[qidx];
+ /* Maintain equal buffer count across all pools */
+ if (rxq->precharge_cnt >= rxq->qlen_mask)
+ continue;
+ rxq->precharge_cnt++;
+ mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
+ if (mbuf)
+ return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
+ }
+ return 0;
+}
+
+static int
+nicvf_dev_start(struct rte_eth_dev *dev)
+{
+ int ret;
+ uint16_t qidx;
+ uint32_t buffsz = 0, rbdrsz = 0;
+ uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
+ uint64_t mbuf_phys_off = 0;
+ struct nicvf_rxq *rxq;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_mbuf *mbuf;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ uint16_t mtu;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Userspace process exited without proper shutdown in last run */
+ if (nicvf_qset_rbdr_active(nic, 0))
+ nicvf_dev_stop(dev);
+
+ /*
+ * Thunderx nicvf PMD can support more than one pool per port only when
+ * 1) Data payload size is same across all the pools in given port
+ * AND
+ * 2) All mbuffs in the pools are from the same hugepage
+ * AND
+ * 3) Mbuff metadata size is same across all the pools in given port
+ *
+ * This is to support existing application that uses multiple pool/port.
+ * But, the purpose of using multipool for QoS will not be addressed.
+ *
+ */
+
+ /* Validate RBDR buff size */
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ mbp_priv = rte_mempool_get_priv(rxq->pool);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ if (buffsz % 128) {
+ PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
+ return -EINVAL;
+ }
+ if (rbdrsz == 0)
+ rbdrsz = buffsz;
+ if (rbdrsz != buffsz) {
+ PMD_INIT_LOG(ERR, "buffsz not same, qid=%d (%d/%d)",
+ qidx, rbdrsz, buffsz);
+ return -EINVAL;
+ }
+ }
+
+ /* Validate mempool attributes */
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
+ mbuf = rte_pktmbuf_alloc(rxq->pool);
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "Failed allocate mbuf qid=%d pool=%s",
+ qidx, rxq->pool->name);
+ return -ENOMEM;
+ }
+ rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
+ rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM;
+ rte_pktmbuf_free(mbuf);
+
+ if (mbuf_phys_off == 0)
+ mbuf_phys_off = rxq->mbuf_phys_off;
+ if (mbuf_phys_off != rxq->mbuf_phys_off) {
+ PMD_INIT_LOG(ERR, "pool params not same,%s %" PRIx64,
+ rxq->pool->name, mbuf_phys_off);
+ return -EINVAL;
+ }
+ }
+
+ /* Check the level of buffers in the pool */
+ total_rxq_desc = 0;
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ /* Count total numbers of rxq descs */
+ total_rxq_desc += rxq->qlen_mask + 1;
+ exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
+ exp_buffs *= dev->data->nb_rx_queues;
+ if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
+ PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
+ rxq->pool->name,
+ rte_mempool_avail_count(rxq->pool),
+ exp_buffs);
+ return -ENOENT;
+ }
+ }
+
+ /* Check RBDR desc overflow */
+ ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
+ if (ret == 0) {
+ PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc");
+ return -ENOMEM;
+ }
+
+ /* Enable qset */
+ ret = nicvf_qset_config(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to enable qset %d", ret);
+ return ret;
+ }
+
+ /* Allocate RBDR and RBDR ring desc */
+ nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
+ ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc");
+ goto qset_reclaim;
+ }
+
+ /* Enable and configure RBDR registers */
+ ret = nicvf_qset_rbdr_config(nic, 0);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure rbdr %d", ret);
+ goto qset_rbdr_free;
+ }
+
+ /* Fill rte_mempool buffers in RBDR pool and precharge it */
+ ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
+ total_rxq_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to fill rbdr %d", ret);
+ goto qset_rbdr_reclaim;
+ }
+
+ PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR",
+ nic->rbdr->tail, nb_rbdr_desc);
+
+ /* Configure RX queues */
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+ ret = nicvf_start_rx_queue(dev, qidx);
+ if (ret)
+ goto start_rxq_error;
+ }
+
+ /* Configure VLAN Strip */
+ nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
+
+ /* Configure TX queues */
+ for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) {
+ ret = nicvf_start_tx_queue(dev, qidx);
+ if (ret)
+ goto start_txq_error;
+ }
+
+ /* Configure CPI algorithm */
+ ret = nicvf_configure_cpi(dev);
+ if (ret)
+ goto start_txq_error;
+
+ /* Configure RSS */
+ ret = nicvf_configure_rss(dev);
+ if (ret)
+ goto qset_rss_error;
+
+ /* Configure loopback */
+ ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
+ goto qset_rss_error;
+ }
+
+ /* Reset all statistics counters attached to this port */
+ ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
+ goto qset_rss_error;
+ }
+
+ /* Setup scatter mode if needed by jumbo */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE > buffsz)
+ dev->data->scattered_rx = 1;
+ if (rx_conf->enable_scatter)
+ dev->data->scattered_rx = 1;
+
+ /* Setup MTU based on max_rx_pkt_len or default */
+ mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
+ dev->data->dev_conf.rxmode.max_rx_pkt_len
+ - ETHER_HDR_LEN - ETHER_CRC_LEN
+ : ETHER_MTU;
+
+ if (nicvf_dev_set_mtu(dev, mtu)) {
+ PMD_INIT_LOG(ERR, "Failed to set default mtu size");
+ return -EBUSY;
+ }
+
+ /* Configure callbacks based on scatter mode */
+ nicvf_set_tx_function(dev);
+ nicvf_set_rx_function(dev);
+
+ /* Done; Let PF make the BGX's RX and TX switches to ON position */
+ nicvf_mbox_cfg_done(nic);
+ return 0;
+
+qset_rss_error:
+ nicvf_rss_term(nic);
+start_txq_error:
+ for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++)
+ nicvf_vf_stop_tx_queue(dev, nic, qidx);
+start_rxq_error:
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++)
+ nicvf_vf_stop_rx_queue(dev, nic, qidx);
+qset_rbdr_reclaim:
+ nicvf_qset_rbdr_reclaim(nic, 0);
+ nicvf_rbdr_release_mbufs(dev, nic);
+qset_rbdr_free:
+ if (nic->rbdr) {
+ rte_free(nic->rbdr);
+ nic->rbdr = NULL;
+ }
+qset_reclaim:
+ nicvf_qset_reclaim(nic);
+ return ret;
+}
+
+static void
+nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
+{
+ size_t i;
+ int ret;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Teardown secondary vf first */
+ for (i = 0; i < nic->sqs_count; i++) {
+ if (!nic->snicvf[i])
+ continue;
+
+ nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
+ }
+
+ /* Stop the primary VF now */
+ nicvf_vf_stop(dev, nic, cleanup);
+
+ /* Disable loopback */
+ ret = nicvf_loopback_config(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
+
+ /* Reclaim CPI configuration */
+ ret = nicvf_mbox_config_cpi(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
+}
+
+static void
+nicvf_dev_stop(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ nicvf_dev_stop_cleanup(dev, false);
+}
+
+static void
+nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
+{
+ int ret;
+ uint16_t qidx;
+ uint16_t tx_start, tx_end;
+ uint16_t rx_start, rx_end;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cleanup) {
+ /* Let PF make the BGX's RX and TX switches to OFF position */
+ nicvf_mbox_shutdown(nic);
+ }
+
+ /* Disable VLAN Strip */
+ nicvf_vlan_hw_strip(nic, 0);
+
+ /* Get queue ranges for this VF */
+ nicvf_tx_range(dev, nic, &tx_start, &tx_end);
+
+ for (qidx = tx_start; qidx <= tx_end; qidx++)
+ nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
+
+ /* Get queue ranges for this VF */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
+
+ /* Reclaim rq */
+ for (qidx = rx_start; qidx <= rx_end; qidx++)
+ nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
+
+ /* Reclaim RBDR */
+ ret = nicvf_qset_rbdr_reclaim(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
+
+ /* Move all charged buffers in RBDR back to pool */
+ if (nic->rbdr != NULL)
+ nicvf_rbdr_release_mbufs(dev, nic);
+
+ /* Disable qset */
+ ret = nicvf_qset_reclaim(nic);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
+
+ /* Disable all interrupts */
+ nicvf_disable_all_interrupts(nic);
+
+ /* Free RBDR SW structure */
+ if (nic->rbdr) {
+ rte_free(nic->rbdr);
+ nic->rbdr = NULL;
+ }
+}
+
+static void
+nicvf_dev_close(struct rte_eth_dev *dev)
+{
+ size_t i;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ nicvf_dev_stop_cleanup(dev, true);
+ nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
+
+ for (i = 0; i < nic->sqs_count; i++) {
+ if (!nic->snicvf[i])
+ continue;
+
+ nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
+ }
+}
+
static int
nicvf_dev_configure(struct rte_eth_dev *dev)
{
/* Initialize and register driver with DPDK Application */
static const struct eth_dev_ops nicvf_eth_dev_ops = {
.dev_configure = nicvf_dev_configure,
+ .dev_start = nicvf_dev_start,
+ .dev_stop = nicvf_dev_stop,
.link_update = nicvf_dev_link_update,
+ .dev_close = nicvf_dev_close,
.stats_get = nicvf_dev_stats_get,
.stats_reset = nicvf_dev_stats_reset,
.promiscuous_enable = nicvf_dev_promisc_enable,
.dev_infos_get = nicvf_dev_info_get,
+ .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
.mtu_set = nicvf_dev_set_mtu,
.reta_update = nicvf_dev_reta_update,
.reta_query = nicvf_dev_reta_query,
.rss_hash_update = nicvf_dev_rss_hash_update,
.rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
+ .rx_queue_start = nicvf_dev_rx_queue_start,
+ .rx_queue_stop = nicvf_dev_rx_queue_stop,
+ .tx_queue_start = nicvf_dev_tx_queue_start,
+ .tx_queue_stop = nicvf_dev_tx_queue_stop,
.rx_queue_setup = nicvf_dev_rx_queue_setup,
.rx_queue_release = nicvf_dev_rx_queue_release,
+ .rx_queue_count = nicvf_dev_rx_queue_count,
.tx_queue_setup = nicvf_dev_tx_queue_setup,
.tx_queue_release = nicvf_dev_tx_queue_release,
- .get_reg_length = nicvf_dev_get_reg_length,
.get_reg = nicvf_dev_get_regs,
};
eth_dev->dev_ops = &nicvf_eth_dev_ops;
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /* Setup callbacks for secondary process */
+ nicvf_set_tx_function(eth_dev);
+ nicvf_set_rx_function(eth_dev);
+ return 0;
+ }
+
pci_dev = eth_dev->pci_dev;
rte_eth_copy_pci_info(eth_dev, pci_dev);
nic->vendor_id = pci_dev->id.vendor_id;
nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
- nic->eth_dev = eth_dev;
PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
pci_dev->id.vendor_id, pci_dev->id.device_id,
nicvf_disable_all_interrupts(nic);
- ret = nicvf_periodic_alarm_start(nic);
+ ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to start period alarm");
goto fail;
goto malloc_fail;
}
- ret = nicvf_mbox_get_rss_size(nic);
- if (ret) {
- PMD_INIT_LOG(ERR, "Failed to get rss table size");
- goto malloc_fail;
- }
-
PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, nic->vendor_id, nic->device_id,
nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
malloc_fail:
rte_free(eth_dev->data->mac_addrs);
alarm_fail:
- nicvf_periodic_alarm_stop(nic);
+ nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
fail:
return ret;
}
{
.class_id = RTE_CLASS_ANY_ID,
.vendor_id = PCI_VENDOR_ID_CAVIUM,
- .device_id = PCI_DEVICE_ID_THUNDERX_PASS1_NICVF,
+ .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
- .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
},
{
.class_id = RTE_CLASS_ANY_ID,
.vendor_id = PCI_VENDOR_ID_CAVIUM,
- .device_id = PCI_DEVICE_ID_THUNDERX_PASS2_NICVF,
+ .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
- .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
},
{
.vendor_id = 0,
static struct eth_driver rte_nicvf_pmd = {
.pci_drv = {
- .name = "rte_nicvf_pmd",
.id_table = pci_id_nicvf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = nicvf_eth_dev_init,
.dev_private_size = sizeof(struct nicvf),
};
-static int
-rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- PMD_INIT_LOG(INFO, "librte_pmd_thunderx nicvf version %s",
- THUNDERX_NICVF_PMD_VERSION);
-
- rte_eth_driver_register(&rte_nicvf_pmd);
- return 0;
-}
-
-static struct rte_driver rte_nicvf_driver = {
- .name = "nicvf_driver",
- .type = PMD_PDEV,
- .init = rte_nicvf_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_nicvf_driver);
+DRIVER_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv);
+DRIVER_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);