net/avf: enable queue and device
[dpdk.git] / drivers / net / avf / avf_ethdev.c
index f596e6b..e0ee05a 100644 (file)
 #include "base/avf_type.h"
 
 #include "avf.h"
+#include "avf_rxtx.h"
+
+static int avf_dev_configure(struct rte_eth_dev *dev);
+static int avf_dev_start(struct rte_eth_dev *dev);
+static void avf_dev_stop(struct rte_eth_dev *dev);
+static void avf_dev_close(struct rte_eth_dev *dev);
+static void avf_dev_info_get(struct rte_eth_dev *dev,
+                            struct rte_eth_dev_info *dev_info);
 
 int avf_logtype_init;
 int avf_logtype_driver;
@@ -40,8 +48,365 @@ static const struct rte_pci_id pci_id_avf_map[] = {
 };
 
 static const struct eth_dev_ops avf_eth_dev_ops = {
+       .dev_configure              = avf_dev_configure,
+       .dev_start                  = avf_dev_start,
+       .dev_stop                   = avf_dev_stop,
+       .dev_close                  = avf_dev_close,
+       .dev_infos_get              = avf_dev_info_get,
+       .rx_queue_start             = avf_dev_rx_queue_start,
+       .rx_queue_stop              = avf_dev_rx_queue_stop,
+       .tx_queue_start             = avf_dev_tx_queue_start,
+       .tx_queue_stop              = avf_dev_tx_queue_stop,
+       .rx_queue_setup             = avf_dev_rx_queue_setup,
+       .rx_queue_release           = avf_dev_rx_queue_release,
+       .tx_queue_setup             = avf_dev_tx_queue_setup,
+       .tx_queue_release           = avf_dev_tx_queue_release,
 };
 
+static int
+avf_dev_configure(struct rte_eth_dev *dev)
+{
+       struct avf_adapter *ad =
+               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(ad);
+       struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+       /* Vlan stripping setting */
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
+               if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+                       avf_enable_vlan_strip(ad);
+               else
+                       avf_disable_vlan_strip(ad);
+       }
+       return 0;
+}
+
+static int
+avf_init_rss(struct avf_adapter *adapter)
+{
+       struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(adapter);
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+       struct rte_eth_rss_conf *rss_conf;
+       uint8_t i, j, nb_q;
+       int ret;
+
+       rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+       nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
+                      AVF_MAX_NUM_QUEUES);
+
+       if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
+               PMD_DRV_LOG(DEBUG, "RSS is not supported");
+               return -ENOTSUP;
+       }
+       if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+               PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
+               /* set all lut items to default queue */
+               for (i = 0; i < vf->vf_res->rss_lut_size; i++)
+                       vf->rss_lut[i] = 0;
+               ret = avf_configure_rss_lut(adapter);
+               return ret;
+       }
+
+       /* In AVF, RSS enablement is set by PF driver. It is not supported
+        * to set based on rss_conf->rss_hf.
+        */
+
+       /* configure RSS key */
+       if (!rss_conf->rss_key) {
+               /* Calculate the default hash key */
+               for (i = 0; i <= vf->vf_res->rss_key_size; i++)
+                       vf->rss_key[i] = (uint8_t)rte_rand();
+       } else
+               rte_memcpy(vf->rss_key, rss_conf->rss_key,
+                          RTE_MIN(rss_conf->rss_key_len,
+                                  vf->vf_res->rss_key_size));
+
+       /* init RSS LUT table */
+       for (i = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
+               if (j >= nb_q)
+                       j = 0;
+               vf->rss_lut[i] = j;
+       }
+       /* send virtchnnl ops to configure rss*/
+       ret = avf_configure_rss_lut(adapter);
+       if (ret)
+               return ret;
+       ret = avf_configure_rss_key(adapter);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
+{
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_dev_data *dev_data = dev->data;
+       uint16_t buf_size, max_pkt_len, len;
+
+       buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+
+       /* Calculate the maximum packet length allowed */
+       len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
+       max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+       /* Check if the jumbo frame and maximum packet length are set
+        * correctly.
+        */
+       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+               if (max_pkt_len <= ETHER_MAX_LEN ||
+                   max_pkt_len > AVF_FRAME_SIZE_MAX) {
+                       PMD_DRV_LOG(ERR, "maximum packet length must be "
+                                   "larger than %u and smaller than %u, "
+                                   "as jumbo frame is enabled",
+                                   (uint32_t)ETHER_MAX_LEN,
+                                   (uint32_t)AVF_FRAME_SIZE_MAX);
+                       return -EINVAL;
+               }
+       } else {
+               if (max_pkt_len < ETHER_MIN_LEN ||
+                   max_pkt_len > ETHER_MAX_LEN) {
+                       PMD_DRV_LOG(ERR, "maximum packet length must be "
+                                   "larger than %u and smaller than %u, "
+                                   "as jumbo frame is disabled",
+                                   (uint32_t)ETHER_MIN_LEN,
+                                   (uint32_t)ETHER_MAX_LEN);
+                       return -EINVAL;
+               }
+       }
+
+       rxq->max_pkt_len = max_pkt_len;
+       if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+           (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
+               dev_data->scattered_rx = 1;
+       }
+       AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+       AVF_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int
+avf_init_queues(struct rte_eth_dev *dev)
+{
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct avf_rx_queue **rxq =
+               (struct avf_rx_queue **)dev->data->rx_queues;
+       struct avf_tx_queue **txq =
+               (struct avf_tx_queue **)dev->data->tx_queues;
+       int i, ret = AVF_SUCCESS;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               if (!rxq[i] || !rxq[i]->q_set)
+                       continue;
+               ret = avf_init_rxq(dev, rxq[i]);
+               if (ret != AVF_SUCCESS)
+                       break;
+       }
+       /* TODO: set rx/tx function to vector/scatter/single-segment
+        * according to parameters
+        */
+       return ret;
+}
+
+static int
+avf_start_queues(struct rte_eth_dev *dev)
+{
+       struct avf_rx_queue *rxq;
+       struct avf_tx_queue *txq;
+       int i;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (txq->tx_deferred_start)
+                       continue;
+               if (avf_dev_tx_queue_start(dev, i) != 0) {
+                       PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+                       return -1;
+               }
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               if (rxq->rx_deferred_start)
+                       continue;
+               if (avf_dev_rx_queue_start(dev, i) != 0) {
+                       PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+static int
+avf_dev_start(struct rte_eth_dev *dev)
+{
+       struct avf_adapter *adapter =
+               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = dev->intr_handle;
+       uint16_t interval;
+       int i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       hw->adapter_stopped = 0;
+
+       vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+                                     dev->data->nb_tx_queues);
+
+       /* TODO: Rx interrupt */
+
+       if (avf_init_queues(dev) != 0) {
+               PMD_DRV_LOG(ERR, "failed to do Queue init");
+               return -1;
+       }
+
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+               if (avf_init_rss(adapter) != 0) {
+                       PMD_DRV_LOG(ERR, "configure rss failed");
+                       goto err_rss;
+               }
+       }
+
+       if (avf_configure_queues(adapter) != 0) {
+               PMD_DRV_LOG(ERR, "configure queues failed");
+               goto err_queue;
+       }
+
+       /* Map interrupt for writeback */
+       vf->nb_msix = 1;
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+               /* If WB_ON_ITR supports, enable it */
+               vf->msix_base = AVF_RX_VEC_START;
+               AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
+                             AVFINT_DYN_CTLN1_ITR_INDX_MASK |
+                             AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
+       } else {
+               /* If no WB_ON_ITR offload flags, need to set interrupt for
+                * descriptor write back.
+                */
+               vf->msix_base = AVF_MISC_VEC_ID;
+
+               /* set ITR to max */
+               interval = avf_calc_itr_interval(AVF_QUEUE_ITR_INTERVAL_MAX);
+               AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
+                             AVFINT_DYN_CTL01_INTENA_MASK |
+                             (AVF_ITR_INDEX_DEFAULT <<
+                              AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
+                             (interval << AVFINT_DYN_CTL01_INTERVAL_SHIFT));
+       }
+       AVF_WRITE_FLUSH(hw);
+       /* map all queues to the same interrupt */
+       for (i = 0; i < dev->data->nb_rx_queues; i++)
+               vf->rxq_map[0] |= 1 << i;
+       if (avf_config_irq_map(adapter)) {
+               PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+               goto err_queue;
+       }
+
+       /* Set all mac addrs */
+       avf_add_del_all_mac_addr(adapter, TRUE);
+
+       if (avf_start_queues(dev) != 0) {
+               PMD_DRV_LOG(ERR, "enable queues failed");
+               goto err_mac;
+       }
+
+       /* TODO: enable interrupt for RX interrupt */
+       return 0;
+
+err_mac:
+       avf_add_del_all_mac_addr(adapter, FALSE);
+err_queue:
+err_rss:
+       return -1;
+}
+
+static void
+avf_dev_stop(struct rte_eth_dev *dev)
+{
+       struct avf_adapter *adapter =
+               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
+       int ret, i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (hw->adapter_stopped == 1)
+               return;
+
+       avf_stop_queues(dev);
+
+       /*TODO: Disable the interrupt for Rx*/
+
+       /* TODO: Rx interrupt vector mapping free */
+
+       /* remove all mac addrs */
+       avf_add_del_all_mac_addr(adapter, FALSE);
+       hw->adapter_stopped = 1;
+}
+
+static void
+avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+       struct avf_adapter *adapter =
+               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+       memset(dev_info, 0, sizeof(*dev_info));
+       dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
+       dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+       dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
+       dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
+       dev_info->hash_key_size = vf->vf_res->rss_key_size;
+       dev_info->reta_size = vf->vf_res->rss_lut_size;
+       dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
+       dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
+       dev_info->rx_offload_capa =
+               DEV_RX_OFFLOAD_VLAN_STRIP |
+               DEV_RX_OFFLOAD_IPV4_CKSUM |
+               DEV_RX_OFFLOAD_UDP_CKSUM |
+               DEV_RX_OFFLOAD_TCP_CKSUM;
+       dev_info->tx_offload_capa =
+               DEV_TX_OFFLOAD_VLAN_INSERT |
+               DEV_TX_OFFLOAD_IPV4_CKSUM |
+               DEV_TX_OFFLOAD_UDP_CKSUM |
+               DEV_TX_OFFLOAD_TCP_CKSUM |
+               DEV_TX_OFFLOAD_SCTP_CKSUM |
+               DEV_TX_OFFLOAD_TCP_TSO;
+
+       dev_info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
+               .rx_drop_en = 0,
+       };
+
+       dev_info->default_txconf = (struct rte_eth_txconf) {
+               .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
+               .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
+               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+                               ETH_TXQ_FLAGS_NOOFFLOADS,
+       };
+
+       dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = AVF_MAX_RING_DESC,
+               .nb_min = AVF_MIN_RING_DESC,
+               .nb_align = AVF_ALIGN_RING_DESC,
+       };
+
+       dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = AVF_MAX_RING_DESC,
+               .nb_min = AVF_MIN_RING_DESC,
+               .nb_align = AVF_ALIGN_RING_DESC,
+       };
+}
+
 static int
 avf_check_vf_reset_done(struct avf_hw *hw)
 {
@@ -250,6 +615,7 @@ avf_dev_close(struct rte_eth_dev *dev)
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 
+       avf_dev_stop(dev);
        avf_shutdown_adminq(hw);
        /* disable uio intr before callback unregister */
        rte_intr_disable(intr_handle);