From 69dd4c3d0898559ef326ab8f53beec26b62cc098 Mon Sep 17 00:00:00 2001 From: Jingjing Wu Date: Wed, 10 Jan 2018 21:01:55 +0800 Subject: [PATCH] net/avf: enable queue and device enable device and queue setup ops like: - dev_configure - dev_start - dev_stop - dev_close - dev_infos_get - rx_queue_start - rx_queue_stop - tx_queue_start - tx_queue_stop - rx_queue_setup - rx_queue_release - tx_queue_setup - tx_queue_release Signed-off-by: Jingjing Wu --- drivers/net/avf/Makefile | 1 + drivers/net/avf/avf.h | 18 + drivers/net/avf/avf_ethdev.c | 366 +++++++++++++++++++++ drivers/net/avf/avf_rxtx.c | 616 +++++++++++++++++++++++++++++++++++ drivers/net/avf/avf_rxtx.h | 160 +++++++++ drivers/net/avf/avf_vchnl.c | 359 +++++++++++++++++++- 6 files changed, 1518 insertions(+), 2 deletions(-) create mode 100644 drivers/net/avf/avf_rxtx.c create mode 100644 drivers/net/avf/avf_rxtx.h diff --git a/drivers/net/avf/Makefile b/drivers/net/avf/Makefile index 2376cfd26b..e172bf5cee 100644 --- a/drivers/net/avf/Makefile +++ b/drivers/net/avf/Makefile @@ -43,5 +43,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_common.c SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_vchnl.c +SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_rxtx.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/avf/avf.h b/drivers/net/avf/avf.h index 1e3825d955..4e53d9c019 100644 --- a/drivers/net/avf/avf.h +++ b/drivers/net/avf/avf.h @@ -36,6 +36,13 @@ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | \ VIRTCHNL_VF_OFFLOAD_RX_POLLING) +#define AVF_RSS_OFFLOAD_ALL ( \ + ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_NONFRAG_IPV4_SCTP | \ + ETH_RSS_NONFRAG_IPV4_OTHER) + #define AVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET #define AVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET @@ -181,4 +188,15 @@ _atomic_set_cmd(struct avf_info *vf, enum virtchnl_ops ops) int avf_check_api_version(struct avf_adapter *adapter); int avf_get_vf_resource(struct avf_adapter *adapter); void avf_handle_virtchnl_msg(struct rte_eth_dev *dev); +int avf_enable_vlan_strip(struct avf_adapter *adapter); +int avf_disable_vlan_strip(struct avf_adapter *adapter); +int avf_switch_queue(struct avf_adapter *adapter, uint16_t qid, + bool rx, bool on); +int avf_enable_queues(struct avf_adapter *adapter); +int avf_disable_queues(struct avf_adapter *adapter); +int avf_configure_rss_lut(struct avf_adapter *adapter); +int avf_configure_rss_key(struct avf_adapter *adapter); +int avf_configure_queues(struct avf_adapter *adapter); +int avf_config_irq_map(struct avf_adapter *adapter); +void avf_add_del_all_mac_addr(struct avf_adapter *adapter, bool add); #endif /* _AVF_ETHDEV_H_ */ diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c index f596e6bf72..e0ee05a6c7 100644 --- a/drivers/net/avf/avf_ethdev.c +++ b/drivers/net/avf/avf_ethdev.c @@ -31,6 +31,14 @@ #include "base/avf_type.h" #include "avf.h" +#include "avf_rxtx.h" + +static int avf_dev_configure(struct rte_eth_dev *dev); +static int avf_dev_start(struct rte_eth_dev *dev); +static void avf_dev_stop(struct rte_eth_dev *dev); +static void avf_dev_close(struct rte_eth_dev *dev); +static void avf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); int avf_logtype_init; int avf_logtype_driver; @@ -40,8 +48,365 @@ static const struct rte_pci_id pci_id_avf_map[] = { }; static const struct eth_dev_ops avf_eth_dev_ops = { + .dev_configure = avf_dev_configure, + .dev_start = avf_dev_start, + .dev_stop = avf_dev_stop, + .dev_close = avf_dev_close, + .dev_infos_get = avf_dev_info_get, + .rx_queue_start = avf_dev_rx_queue_start, + .rx_queue_stop = avf_dev_rx_queue_stop, + .tx_queue_start = avf_dev_tx_queue_start, + .tx_queue_stop = avf_dev_tx_queue_stop, + .rx_queue_setup = avf_dev_rx_queue_setup, + .rx_queue_release = avf_dev_rx_queue_release, + .tx_queue_setup = avf_dev_tx_queue_setup, + .tx_queue_release = avf_dev_tx_queue_release, }; +static int +avf_dev_configure(struct rte_eth_dev *dev) +{ + struct avf_adapter *ad = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(ad); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + + /* Vlan stripping setting */ + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) { + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + avf_enable_vlan_strip(ad); + else + avf_disable_vlan_strip(ad); + } + return 0; +} + +static int +avf_init_rss(struct avf_adapter *adapter) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); + struct rte_eth_rss_conf *rss_conf; + uint8_t i, j, nb_q; + int ret; + + rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues, + AVF_MAX_NUM_QUEUES); + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { + PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default"); + /* set all lut items to default queue */ + for (i = 0; i < vf->vf_res->rss_lut_size; i++) + vf->rss_lut[i] = 0; + ret = avf_configure_rss_lut(adapter); + return ret; + } + + /* In AVF, RSS enablement is set by PF driver. It is not supported + * to set based on rss_conf->rss_hf. + */ + + /* configure RSS key */ + if (!rss_conf->rss_key) { + /* Calculate the default hash key */ + for (i = 0; i <= vf->vf_res->rss_key_size; i++) + vf->rss_key[i] = (uint8_t)rte_rand(); + } else + rte_memcpy(vf->rss_key, rss_conf->rss_key, + RTE_MIN(rss_conf->rss_key_len, + vf->vf_res->rss_key_size)); + + /* init RSS LUT table */ + for (i = 0; i < vf->vf_res->rss_lut_size; i++, j++) { + if (j >= nb_q) + j = 0; + vf->rss_lut[i] = j; + } + /* send virtchnnl ops to configure rss*/ + ret = avf_configure_rss_lut(adapter); + if (ret) + return ret; + ret = avf_configure_rss_key(adapter); + if (ret) + return ret; + + return 0; +} + +static int +avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq) +{ + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_dev_data *dev_data = dev->data; + uint16_t buf_size, max_pkt_len, len; + + buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + + /* Calculate the maximum packet length allowed */ + len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS; + max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len); + + /* Check if the jumbo frame and maximum packet length are set + * correctly. + */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (max_pkt_len <= ETHER_MAX_LEN || + max_pkt_len > AVF_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is enabled", + (uint32_t)ETHER_MAX_LEN, + (uint32_t)AVF_FRAME_SIZE_MAX); + return -EINVAL; + } + } else { + if (max_pkt_len < ETHER_MIN_LEN || + max_pkt_len > ETHER_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)ETHER_MIN_LEN, + (uint32_t)ETHER_MAX_LEN); + return -EINVAL; + } + } + + rxq->max_pkt_len = max_pkt_len; + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || + (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) { + dev_data->scattered_rx = 1; + } + AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + AVF_WRITE_FLUSH(hw); + + return 0; +} + +static int +avf_init_queues(struct rte_eth_dev *dev) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct avf_rx_queue **rxq = + (struct avf_rx_queue **)dev->data->rx_queues; + struct avf_tx_queue **txq = + (struct avf_tx_queue **)dev->data->tx_queues; + int i, ret = AVF_SUCCESS; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (!rxq[i] || !rxq[i]->q_set) + continue; + ret = avf_init_rxq(dev, rxq[i]); + if (ret != AVF_SUCCESS) + break; + } + /* TODO: set rx/tx function to vector/scatter/single-segment + * according to parameters + */ + return ret; +} + +static int +avf_start_queues(struct rte_eth_dev *dev) +{ + struct avf_rx_queue *rxq; + struct avf_tx_queue *txq; + int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq->tx_deferred_start) + continue; + if (avf_dev_tx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq->rx_deferred_start) + continue; + if (avf_dev_rx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + return 0; +} + +static int +avf_dev_start(struct rte_eth_dev *dev) +{ + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = dev->intr_handle; + uint16_t interval; + int i; + + PMD_INIT_FUNC_TRACE(); + + hw->adapter_stopped = 0; + + vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + + /* TODO: Rx interrupt */ + + if (avf_init_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "failed to do Queue init"); + return -1; + } + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (avf_init_rss(adapter) != 0) { + PMD_DRV_LOG(ERR, "configure rss failed"); + goto err_rss; + } + } + + if (avf_configure_queues(adapter) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); + goto err_queue; + } + + /* Map interrupt for writeback */ + vf->nb_msix = 1; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* If WB_ON_ITR supports, enable it */ + vf->msix_base = AVF_RX_VEC_START; + AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1), + AVFINT_DYN_CTLN1_ITR_INDX_MASK | + AVFINT_DYN_CTLN1_WB_ON_ITR_MASK); + } else { + /* If no WB_ON_ITR offload flags, need to set interrupt for + * descriptor write back. + */ + vf->msix_base = AVF_MISC_VEC_ID; + + /* set ITR to max */ + interval = avf_calc_itr_interval(AVF_QUEUE_ITR_INTERVAL_MAX); + AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, + AVFINT_DYN_CTL01_INTENA_MASK | + (AVF_ITR_INDEX_DEFAULT << + AVFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << AVFINT_DYN_CTL01_INTERVAL_SHIFT)); + } + AVF_WRITE_FLUSH(hw); + /* map all queues to the same interrupt */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + vf->rxq_map[0] |= 1 << i; + if (avf_config_irq_map(adapter)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + goto err_queue; + } + + /* Set all mac addrs */ + avf_add_del_all_mac_addr(adapter, TRUE); + + if (avf_start_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "enable queues failed"); + goto err_mac; + } + + /* TODO: enable interrupt for RX interrupt */ + return 0; + +err_mac: + avf_add_del_all_mac_addr(adapter, FALSE); +err_queue: +err_rss: + return -1; +} + +static void +avf_dev_stop(struct rte_eth_dev *dev) +{ + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev); + int ret, i; + + PMD_INIT_FUNC_TRACE(); + + if (hw->adapter_stopped == 1) + return; + + avf_stop_queues(dev); + + /*TODO: Disable the interrupt for Rx*/ + + /* TODO: Rx interrupt vector mapping free */ + + /* remove all mac addrs */ + avf_add_del_all_mac_addr(adapter, FALSE); + hw->adapter_stopped = 1; +} + +static void +avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + memset(dev_info, 0, sizeof(*dev_info)); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); + dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; + dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; + dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX; + dev_info->hash_key_size = vf->vf_res->rss_key_size; + dev_info->reta_size = vf->vf_res->rss_lut_size; + dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL; + dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = AVF_MAX_RING_DESC, + .nb_min = AVF_MIN_RING_DESC, + .nb_align = AVF_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = AVF_MAX_RING_DESC, + .nb_min = AVF_MIN_RING_DESC, + .nb_align = AVF_ALIGN_RING_DESC, + }; +} + static int avf_check_vf_reset_done(struct avf_hw *hw) { @@ -250,6 +615,7 @@ avf_dev_close(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + avf_dev_stop(dev); avf_shutdown_adminq(hw); /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c new file mode 100644 index 0000000000..2d4fb4cd13 --- /dev/null +++ b/drivers/net/avf/avf_rxtx.c @@ -0,0 +1,616 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "avf_log.h" +#include "base/avf_prototype.h" +#include "base/avf_type.h" +#include "avf.h" +#include "avf_rxtx.h" + +static inline int +check_rx_thresh(uint16_t nb_desc, uint16_t thresh) +{ + /* The following constraints must be satisfied: + * thresh >= AVF_RX_MAX_BURST + * thresh < rxq->nb_rx_desc + * (rxq->nb_rx_desc % thresh) == 0 + */ + if (thresh < AVF_RX_MAX_BURST || + thresh >= nb_desc || + (nb_desc % thresh != 0)) { + PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u, " + "greater than or equal to %u, " + "and a divisor of %u", + thresh, nb_desc, AVF_RX_MAX_BURST, nb_desc); + return -EINVAL; + } + return 0; +} + +static inline int +check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh, + uint16_t tx_free_thresh) +{ + /* TX descriptors will have their RS bit set after tx_rs_thresh + * descriptors have been used. The TX descriptor ring will be cleaned + * after tx_free_thresh descriptors are used or if the number of + * descriptors required to transmit a packet is greater than the + * number of free TX descriptors. + * + * The following constraints must be satisfied: + * - tx_rs_thresh must be less than the size of the ring minus 2. + * - tx_free_thresh must be less than the size of the ring minus 3. + * - tx_rs_thresh must be less than or equal to tx_free_thresh. + * - tx_rs_thresh must be a divisor of the ring size. + * + * One descriptor in the TX ring is used as a sentinel to avoid a H/W + * race condition, hence the maximum threshold constraints. When set + * to zero use default values. + */ + if (tx_rs_thresh >= (nb_desc - 2)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the " + "number of TX descriptors (%u) minus 2", + tx_rs_thresh, nb_desc); + return -EINVAL; + } + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the " + "number of TX descriptors (%u) minus 3.", + tx_free_thresh, nb_desc); + return -EINVAL; + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or " + "equal to tx_free_thresh (%u).", + tx_rs_thresh, tx_free_thresh); + return -EINVAL; + } + if ((nb_desc % tx_rs_thresh) != 0) { + PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the " + "number of TX descriptors (%u).", + tx_rs_thresh, nb_desc); + return -EINVAL; + } + + return 0; +} + +static inline void +reset_rx_queue(struct avf_rx_queue *rxq) +{ + uint16_t len, i; + + if (!rxq) + return; + + len = rxq->nb_rx_desc + AVF_RX_MAX_BURST; + + for (i = 0; i < len * sizeof(union avf_rx_desc); i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + + for (i = 0; i < AVF_RX_MAX_BURST; i++) + rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf; + + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +static inline void +reset_tx_queue(struct avf_tx_queue *txq) +{ + struct avf_tx_entry *txe; + uint16_t i, prev, size; + + if (!txq) { + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); + return; + } + + txe = txq->sw_ring; + size = sizeof(struct avf_tx_desc) * txq->nb_tx_desc; + for (i = 0; i < size; i++) + ((volatile char *)txq->tx_ring)[i] = 0; + + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i].cmd_type_offset_bsz = + rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_tail = 0; + txq->nb_used = 0; + + txq->last_desc_cleaned = txq->nb_tx_desc - 1; + txq->nb_free = txq->nb_tx_desc - 1; + + txq->next_dd = txq->rs_thresh - 1; + txq->next_rs = txq->rs_thresh - 1; +} + +static int +alloc_rxq_mbufs(struct avf_rx_queue *rxq) +{ + volatile union avf_rx_desc *rxd; + struct rte_mbuf *mbuf = NULL; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!mbuf)) { + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + rxd = &rxq->rx_ring[i]; + rxd->read.pkt_addr = dma_addr; + rxd->read.hdr_addr = 0; +#ifndef RTE_LIBRTE_AVF_16BYTE_RX_DESC + rxd->read.rsvd1 = 0; + rxd->read.rsvd2 = 0; +#endif + + rxq->sw_ring[i] = mbuf; + } + + return 0; +} + +static inline void +release_rxq_mbufs(struct avf_rx_queue *rxq) +{ + struct rte_mbuf *mbuf; + uint16_t i; + + if (!rxq->sw_ring) + return; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i]) { + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + rxq->sw_ring[i] = NULL; + } + } +} + +static inline void +release_txq_mbufs(struct avf_tx_queue *txq) +{ + uint16_t i; + + if (!txq || !txq->sw_ring) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + return; + } + + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } +} + +int +avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct avf_adapter *ad = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_rx_queue *rxq; + const struct rte_memzone *mz; + uint32_t ring_size; + uint16_t len, i; + uint16_t rx_free_thresh; + uint16_t base, bsf, tc_mapping; + + PMD_INIT_FUNC_TRACE(); + + if (nb_desc % AVF_ALIGN_RING_DESC != 0 || + nb_desc > AVF_MAX_RING_DESC || + nb_desc < AVF_MIN_RING_DESC) { + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is " + "invalid", nb_desc); + return -EINVAL; + } + + /* Check free threshold */ + rx_free_thresh = (rx_conf->rx_free_thresh == 0) ? + AVF_DEFAULT_RX_FREE_THRESH : + rx_conf->rx_free_thresh; + if (check_rx_thresh(nb_desc, rx_free_thresh) != 0) + return -EINVAL; + + /* Free memory if needed */ + if (dev->data->rx_queues[queue_idx]) { + avf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("avf rxq", + sizeof(struct avf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for " + "rx queue data structure"); + return -ENOMEM; + } + + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->crc_len = 0; /* crc stripping by default */ + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->rx_hdr_len = 0; + + len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_buf_len = RTE_ALIGN(len, (1 << AVF_RXQ_CTX_DBUFF_SHIFT)); + + /* Allocate the software ring. */ + len = nb_desc + AVF_RX_MAX_BURST; + rxq->sw_ring = + rte_zmalloc_socket("avf rx sw ring", + sizeof(struct rte_mbuf *) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq->sw_ring) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + rte_free(rxq); + return -ENOMEM; + } + + /* Allocate the maximun number of RX ring hardware descriptor with + * a liitle more to support bulk allocate. + */ + len = AVF_MAX_RING_DESC + AVF_RX_MAX_BURST; + ring_size = RTE_ALIGN(len * sizeof(union avf_rx_desc), + AVF_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + ring_size, AVF_RING_BASE_ALIGN, + socket_id); + if (!mz) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX"); + rte_free(rxq->sw_ring); + rte_free(rxq); + return -ENOMEM; + } + /* Zero all the descriptors in the ring. */ + memset(mz->addr, 0, ring_size); + rxq->rx_ring_phys_addr = mz->iova; + rxq->rx_ring = (union avf_rx_desc *)mz->addr; + + rxq->mz = mz; + reset_rx_queue(rxq); + rxq->q_set = TRUE; + dev->data->rx_queues[queue_idx] = rxq; + rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id); + + return 0; +} + +int +avf_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct avf_tx_queue *txq; + const struct rte_memzone *mz; + uint32_t ring_size; + uint16_t tx_rs_thresh, tx_free_thresh; + uint16_t i, base, bsf, tc_mapping; + + PMD_INIT_FUNC_TRACE(); + + if (nb_desc % AVF_ALIGN_RING_DESC != 0 || + nb_desc > AVF_MAX_RING_DESC || + nb_desc < AVF_MIN_RING_DESC) { + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is " + "invalid", nb_desc); + return -EINVAL; + } + + tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? + tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh); + + /* Free memory if needed. */ + if (dev->data->tx_queues[queue_idx]) { + avf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("avf txq", + sizeof(struct avf_tx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for " + "tx queue structure"); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->rs_thresh = tx_rs_thresh; + txq->free_thresh = tx_free_thresh; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + txq->txq_flags = tx_conf->txq_flags; + txq->tx_deferred_start = tx_conf->tx_deferred_start; + + /* Allocate software ring */ + txq->sw_ring = + rte_zmalloc_socket("avf tx sw ring", + sizeof(struct avf_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq->sw_ring) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring"); + rte_free(txq); + return -ENOMEM; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct avf_tx_desc) * AVF_MAX_RING_DESC; + ring_size = RTE_ALIGN(ring_size, AVF_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + ring_size, AVF_RING_BASE_ALIGN, + socket_id); + if (!mz) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX"); + rte_free(txq->sw_ring); + rte_free(txq); + return -ENOMEM; + } + txq->tx_ring_phys_addr = mz->iova; + txq->tx_ring = (struct avf_tx_desc *)mz->addr; + + txq->mz = mz; + reset_tx_queue(txq); + txq->q_set = TRUE; + dev->data->tx_queues[queue_idx] = txq; + txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx); + + return 0; +} + +int +avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct avf_rx_queue *rxq; + int err = 0; + + PMD_DRV_FUNC_TRACE(); + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + rxq = dev->data->rx_queues[rx_queue_id]; + + err = alloc_rxq_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } + + rte_wmb(); + + /* Init the RX tail register. */ + AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + AVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = avf_switch_queue(adapter, rx_queue_id, TRUE, TRUE); + if (err) + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + else + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return err; +} + +int +avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct avf_tx_queue *txq; + int err = 0; + + PMD_DRV_FUNC_TRACE(); + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + txq = dev->data->tx_queues[tx_queue_id]; + + /* Init the RX tail register. */ + AVF_PCI_REG_WRITE(txq->qtx_tail, 0); + AVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = avf_switch_queue(adapter, tx_queue_id, FALSE, TRUE); + + if (err) + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + else + dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return err; +} + +int +avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_rx_queue *rxq; + int err; + + PMD_DRV_FUNC_TRACE(); + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + err = avf_switch_queue(adapter, rx_queue_id, TRUE, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + release_rxq_mbufs(rxq); + reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int +avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_tx_queue *txq; + int err; + + PMD_DRV_FUNC_TRACE(); + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + err = avf_switch_queue(adapter, tx_queue_id, FALSE, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); + return err; + } + + txq = dev->data->tx_queues[tx_queue_id]; + release_txq_mbufs(txq); + reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +void +avf_dev_rx_queue_release(void *rxq) +{ + struct avf_rx_queue *q = (struct avf_rx_queue *)rxq; + + if (!q) + return; + + release_rxq_mbufs(q); + rte_free(q->sw_ring); + rte_memzone_free(q->mz); + rte_free(q); +} + +void +avf_dev_tx_queue_release(void *txq) +{ + struct avf_tx_queue *q = (struct avf_tx_queue *)txq; + + if (!q) + return; + + release_txq_mbufs(q); + rte_free(q->sw_ring); + rte_memzone_free(q->mz); + rte_free(q); +} + +void +avf_stop_queues(struct rte_eth_dev *dev) +{ + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_rx_queue *rxq; + struct avf_tx_queue *txq; + int ret, i; + + /* Stop All queues */ + ret = avf_disable_queues(adapter); + if (ret) + PMD_DRV_LOG(WARNING, "Fail to stop queues"); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + release_txq_mbufs(txq); + reset_tx_queue(txq); + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + release_rxq_mbufs(rxq); + reset_rx_queue(rxq); + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } +} diff --git a/drivers/net/avf/avf_rxtx.h b/drivers/net/avf/avf_rxtx.h new file mode 100644 index 0000000000..e227cd1df2 --- /dev/null +++ b/drivers/net/avf/avf_rxtx.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _AVF_RXTX_H_ +#define _AVF_RXTX_H_ + +/* In QLEN must be whole number of 32 descriptors. */ +#define AVF_ALIGN_RING_DESC 32 +#define AVF_MIN_RING_DESC 64 +#define AVF_MAX_RING_DESC 4096 +#define AVF_DMA_MEM_ALIGN 4096 +/* Base address of the HW descriptor ring should be 128B aligned. */ +#define AVF_RING_BASE_ALIGN 128 + +/* used for Rx Bulk Allocate */ +#define AVF_RX_MAX_BURST 32 + +#define DEFAULT_TX_RS_THRESH 32 +#define DEFAULT_TX_FREE_THRESH 32 + +/* HW desc structure, both 16-byte and 32-byte types are supported */ +#ifdef RTE_LIBRTE_AVF_16BYTE_RX_DESC +#define avf_rx_desc avf_16byte_rx_desc +#else +#define avf_rx_desc avf_32byte_rx_desc +#endif + +/* Structure associated with each Rx queue. */ +struct avf_rx_queue { + struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ + const struct rte_memzone *mz; /* memzone for Rx ring */ + volatile union avf_rx_desc *rx_ring; /* Rx ring virtual address */ + uint64_t rx_ring_phys_addr; /* Rx ring DMA address */ + struct rte_mbuf **sw_ring; /* address of SW ring */ + uint16_t nb_rx_desc; /* ring length */ + uint16_t rx_tail; /* current value of tail */ + volatile uint8_t *qrx_tail; /* register address of tail */ + uint16_t rx_free_thresh; /* max free RX desc to hold */ + uint16_t nb_rx_hold; /* number of held free RX desc */ + struct rte_mbuf *pkt_first_seg; /* first segment of current packet */ + struct rte_mbuf *pkt_last_seg; /* last segment of current packet */ + struct rte_mbuf fake_mbuf; /* dummy mbuf */ + + uint16_t port_id; /* device port ID */ + uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + uint16_t queue_id; /* Rx queue index */ + uint16_t rx_buf_len; /* The packet buffer size */ + uint16_t rx_hdr_len; /* The header buffer size */ + uint16_t max_pkt_len; /* Maximum packet length */ + + bool q_set; /* if rx queue has been configured */ + bool rx_deferred_start; /* don't start this queue in dev start */ +}; + +struct avf_tx_entry { + struct rte_mbuf *mbuf; + uint16_t next_id; + uint16_t last_id; +}; + +/* Structure associated with each TX queue. */ +struct avf_tx_queue { + const struct rte_memzone *mz; /* memzone for Tx ring */ + volatile struct avf_tx_desc *tx_ring; /* Tx ring virtual address */ + uint64_t tx_ring_phys_addr; /* Tx ring DMA address */ + struct avf_tx_entry *sw_ring; /* address array of SW ring */ + uint16_t nb_tx_desc; /* ring length */ + uint16_t tx_tail; /* current value of tail */ + volatile uint8_t *qtx_tail; /* register address of tail */ + /* number of used desc since RS bit set */ + uint16_t nb_used; + uint16_t nb_free; + uint16_t last_desc_cleaned; /* last desc have been cleaned*/ + uint16_t free_thresh; + uint16_t rs_thresh; + + uint16_t port_id; + uint16_t queue_id; + uint32_t txq_flags; + uint16_t next_dd; /* next to set RS, for VPMD */ + uint16_t next_rs; /* next to check DD, for VPMD */ + + bool q_set; /* if rx queue has been configured */ + bool tx_deferred_start; /* don't start this queue in dev start */ +}; + +int avf_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); + +int avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +void avf_dev_rx_queue_release(void *rxq); + +int avf_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +int avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +void avf_dev_tx_queue_release(void *txq); +void avf_stop_queues(struct rte_eth_dev *dev); + +static inline +void avf_dump_rx_descriptor(struct avf_rx_queue *rxq, + const void *desc, + uint16_t rx_id) +{ +#ifdef RTE_LIBRTE_AVF_16BYTE_RX_DESC + const union avf_16byte_rx_desc *rx_desc = desc; + + printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n", + rxq->queue_id, rx_id, rx_desc->read.pkt_addr, + rx_desc->read.hdr_addr); +#else + const union avf_32byte_rx_desc *rx_desc = desc; + + printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64 + " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id, + rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr, + rx_desc->read.rsvd1, rx_desc->read.rsvd2); +#endif +} + +/* All the descriptors are 16 bytes, so just use one of them + * to print the qwords + */ +static inline +void avf_dump_tx_descriptor(const struct avf_tx_queue *txq, + const void *desc, uint16_t tx_id) +{ + char *name; + const struct avf_tx_desc *tx_desc = desc; + enum avf_tx_desc_dtype_value type; + + type = (enum avf_tx_desc_dtype_value)rte_le_to_cpu_64( + tx_desc->cmd_type_offset_bsz & + rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK)); + switch (type) { + case AVF_TX_DESC_DTYPE_DATA: + name = "Tx_data_desc"; + break; + case AVF_TX_DESC_DTYPE_CONTEXT: + name = "Tx_context_desc"; + break; + default: + name = "unknown_desc"; + break; + } + + printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n", + txq->queue_id, name, tx_id, tx_desc->buffer_addr, + tx_desc->cmd_type_offset_bsz); +} +#endif /* _AVF_RXTX_H_ */ diff --git a/drivers/net/avf/avf_vchnl.c b/drivers/net/avf/avf_vchnl.c index ebbee31b5c..55a425a7fe 100644 --- a/drivers/net/avf/avf_vchnl.c +++ b/drivers/net/avf/avf_vchnl.c @@ -25,6 +25,7 @@ #include "base/avf_type.h" #include "avf.h" +#include "avf_rxtx.h" #define MAX_TRY_TIMES 200 #define ASQ_DELAY_MS 10 @@ -196,6 +197,48 @@ avf_handle_virtchnl_msg(struct rte_eth_dev *dev) } } +int +avf_enable_vlan_strip(struct avf_adapter *adapter) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct avf_cmd_info args; + int ret; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + ret = avf_execute_vf_cmd(adapter, &args); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " OP_ENABLE_VLAN_STRIPPING"); + + return ret; +} + +int +avf_disable_vlan_strip(struct avf_adapter *adapter) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct avf_cmd_info args; + int ret; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + ret = avf_execute_vf_cmd(adapter, &args); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " OP_DISABLE_VLAN_STRIPPING"); + + return ret; +} + #define VIRTCHNL_VERSION_MAJOR_START 1 #define VIRTCHNL_VERSION_MINOR_START 1 @@ -274,8 +317,8 @@ avf_get_vf_resource(struct avf_adapter *adapter) err = avf_execute_vf_cmd(adapter, &args); if (err) { - PMD_DRV_LOG(ERR, "Failed to execute command of " - "OP_GET_VF_RESOURCE"); + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_GET_VF_RESOURCE"); return -1; } @@ -302,3 +345,315 @@ avf_get_vf_resource(struct avf_adapter *adapter) return 0; } + +int +avf_enable_queues(struct avf_adapter *adapter) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_select queue_select; + struct avf_cmd_info args; + int err; + + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = vf->vsi_res->vsi_id; + + queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1; + queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1; + + args.ops = VIRTCHNL_OP_ENABLE_QUEUES; + args.in_args = (u8 *)&queue_select; + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + err = avf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_QUEUES"); + return err; + } + return 0; +} + +int +avf_disable_queues(struct avf_adapter *adapter) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_select queue_select; + struct avf_cmd_info args; + int err; + + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = vf->vsi_res->vsi_id; + + queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1; + queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1; + + args.ops = VIRTCHNL_OP_DISABLE_QUEUES; + args.in_args = (u8 *)&queue_select; + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + err = avf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES"); + return err; + } + return 0; +} + +int +avf_switch_queue(struct avf_adapter *adapter, uint16_t qid, + bool rx, bool on) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_select queue_select; + struct avf_cmd_info args; + int err; + + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = vf->vsi_res->vsi_id; + if (rx) + queue_select.rx_queues |= 1 << qid; + else + queue_select.tx_queues |= 1 << qid; + + if (on) + args.ops = VIRTCHNL_OP_ENABLE_QUEUES; + else + args.ops = VIRTCHNL_OP_DISABLE_QUEUES; + args.in_args = (u8 *)&queue_select; + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + err = avf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES"); + return err; +} + +int +avf_configure_rss_lut(struct avf_adapter *adapter) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_rss_lut *rss_lut; + struct avf_cmd_info args; + int len, err = 0; + + len = sizeof(*rss_lut) + vf->vf_res->rss_lut_size - 1; + rss_lut = rte_zmalloc("rss_lut", len, 0); + if (!rss_lut) + return -ENOMEM; + + rss_lut->vsi_id = vf->vsi_res->vsi_id; + rss_lut->lut_entries = vf->vf_res->rss_lut_size; + rte_memcpy(rss_lut->lut, vf->rss_lut, vf->vf_res->rss_lut_size); + + args.ops = VIRTCHNL_OP_CONFIG_RSS_LUT; + args.in_args = (u8 *)rss_lut; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + + err = avf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_CONFIG_RSS_LUT"); + + rte_free(rss_lut); + return err; +} + +int +avf_configure_rss_key(struct avf_adapter *adapter) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_rss_key *rss_key; + struct avf_cmd_info args; + int len, err = 0; + + len = sizeof(*rss_key) + vf->vf_res->rss_key_size - 1; + rss_key = rte_zmalloc("rss_key", len, 0); + if (!rss_key) + return -ENOMEM; + + rss_key->vsi_id = vf->vsi_res->vsi_id; + rss_key->key_len = vf->vf_res->rss_key_size; + rte_memcpy(rss_key->key, vf->rss_key, vf->vf_res->rss_key_size); + + args.ops = VIRTCHNL_OP_CONFIG_RSS_KEY; + args.in_args = (u8 *)rss_key; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + + err = avf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_CONFIG_RSS_KEY"); + + rte_free(rss_key); + return err; +} + +int +avf_configure_queues(struct avf_adapter *adapter) +{ + struct avf_rx_queue **rxq = + (struct avf_rx_queue **)adapter->eth_dev->data->rx_queues; + struct avf_tx_queue **txq = + (struct avf_tx_queue **)adapter->eth_dev->data->tx_queues; + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_vsi_queue_config_info *vc_config; + struct virtchnl_queue_pair_info *vc_qp; + struct avf_cmd_info args; + uint16_t i, size; + int err; + + size = sizeof(*vc_config) + + sizeof(vc_config->qpair[0]) * vf->num_queue_pairs; + vc_config = rte_zmalloc("cfg_queue", size, 0); + if (!vc_config) + return -ENOMEM; + + vc_config->vsi_id = vf->vsi_res->vsi_id; + vc_config->num_queue_pairs = vf->num_queue_pairs; + + for (i = 0, vc_qp = vc_config->qpair; + i < vf->num_queue_pairs; + i++, vc_qp++) { + vc_qp->txq.vsi_id = vf->vsi_res->vsi_id; + vc_qp->txq.queue_id = i; + /* Virtchnnl configure queues by pairs */ + if (i < adapter->eth_dev->data->nb_tx_queues) { + vc_qp->txq.ring_len = txq[i]->nb_tx_desc; + vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr; + } + vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id; + vc_qp->rxq.queue_id = i; + vc_qp->rxq.max_pkt_size = vf->max_pkt_len; + /* Virtchnnl configure queues by pairs */ + if (i < adapter->eth_dev->data->nb_rx_queues) { + vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc; + vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr; + vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len; + } + } + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES; + args.in_args = (uint8_t *)vc_config; + args.in_args_size = size; + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + + err = avf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); + + rte_free(vc_config); + return err; +} + +int +avf_config_irq_map(struct avf_adapter *adapter) +{ + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_irq_map_info *map_info; + struct virtchnl_vector_map *vecmap; + struct avf_cmd_info args; + uint32_t vector_id; + int len, i, err; + + len = sizeof(struct virtchnl_irq_map_info) + + sizeof(struct virtchnl_vector_map) * vf->nb_msix; + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->num_vectors = vf->nb_msix; + for (i = 0; i < vf->nb_msix; i++) { + vecmap = &map_info->vecmap[i]; + vecmap->vsi_id = vf->vsi_res->vsi_id; + vecmap->rxitr_idx = AVF_ITR_INDEX_DEFAULT; + vecmap->vector_id = vf->msix_base + i; + vecmap->txq_map = 0; + vecmap->rxq_map = vf->rxq_map[vf->msix_base + i]; + } + + args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; + args.in_args = (u8 *)map_info; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + err = avf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP"); + + rte_free(map_info); + return err; +} + +void +avf_add_del_all_mac_addr(struct avf_adapter *adapter, bool add) +{ + struct virtchnl_ether_addr_list *list; + struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); + struct ether_addr *addr; + struct avf_cmd_info args; + int len, err, i, j; + int next_begin = 0; + int begin = 0; + + do { + j = 0; + len = sizeof(struct virtchnl_ether_addr_list); + for (i = begin; i < AVF_NUM_MACADDR_MAX; i++, next_begin++) { + addr = &adapter->eth_dev->data->mac_addrs[i]; + if (is_zero_ether_addr(addr)) + continue; + len += sizeof(struct virtchnl_ether_addr); + if (len >= AVF_AQ_BUF_SZ) { + next_begin = i + 1; + break; + } + } + + list = rte_zmalloc("avf_del_mac_buffer", len, 0); + if (!list) { + PMD_DRV_LOG(ERR, "fail to allocate memory"); + return; + } + + for (i = begin; i < next_begin; i++) { + addr = &adapter->eth_dev->data->mac_addrs[i]; + if (is_zero_ether_addr(addr)) + continue; + rte_memcpy(list->list[j].addr, addr->addr_bytes, + sizeof(addr->addr_bytes)); + PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + j++; + } + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = j; + args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : + VIRTCHNL_OP_DEL_ETH_ADDR; + args.in_args = (uint8_t *)list; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = AVF_AQ_BUF_SZ; + err = avf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETHER_ADDRESS" : + "OP_DEL_ETHER_ADDRESS"); + rte_free(list); + begin = next_begin; + } while (begin < AVF_NUM_MACADDR_MAX); +} -- 2.20.1