net/iavf: improve default RSS
[dpdk.git] / drivers / net / iavf / iavf_vchnl.c
index b62c868..25d5cda 100644 (file)
@@ -17,6 +17,7 @@
 #include <rte_eal.h>
 #include <rte_ether.h>
 #include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
 #include <rte_dev.h>
 
 #include "iavf.h"
@@ -189,7 +190,33 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
                }
                _clear_cmd(vf);
                break;
-
+       case VIRTCHNL_OP_REQUEST_QUEUES:
+               /*
+                * ignore async reply, only wait for system message,
+                * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING,
+                * if not, means request queues failed.
+                */
+               do {
+                       result = iavf_read_msg_from_pf(adapter, args->out_size,
+                                                  args->out_buffer);
+                       if (result == IAVF_MSG_SYS && vf->vf_reset) {
+                               break;
+                       } else if (result == IAVF_MSG_CMD ||
+                               result == IAVF_MSG_ERR) {
+                               err = -1;
+                               break;
+                       }
+                       rte_delay_ms(ASQ_DELAY_MS);
+                       /* If don't read msg or read sys event, continue */
+               } while (i++ < MAX_TRY_TIMES);
+               if (i >= MAX_TRY_TIMES ||
+                       vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+                       err = -1;
+                       PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+                                   " for cmd %d", vf->cmd_retval, args->ops);
+               }
+               _clear_cmd(vf);
+               break;
        default:
                /* For other virtchnl ops in running time,
                 * wait for the cmd done flag.
@@ -429,7 +456,9 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
        caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
                VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
                VIRTCHNL_VF_OFFLOAD_FDIR_PF |
-               VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+               VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+               VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+               VIRTCHNL_VF_LARGE_NUM_QPAIRS;
 
        args.in_args = (uint8_t *)&caps;
        args.in_args_size = sizeof(caps);
@@ -579,6 +608,140 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
        return err;
 }
 
+int
+iavf_enable_queues_lv(struct iavf_adapter *adapter)
+{
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+       struct virtchnl_del_ena_dis_queues *queue_select;
+       struct virtchnl_queue_chunk *queue_chunk;
+       struct iavf_cmd_info args;
+       int err, len;
+
+       len = sizeof(struct virtchnl_del_ena_dis_queues) +
+                 sizeof(struct virtchnl_queue_chunk) *
+                 (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+       queue_select = rte_zmalloc("queue_select", len, 0);
+       if (!queue_select)
+               return -ENOMEM;
+
+       queue_chunk = queue_select->chunks.chunks;
+       queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+       queue_select->vport_id = vf->vsi_res->vsi_id;
+
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+               adapter->eth_dev->data->nb_tx_queues;
+
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+               adapter->eth_dev->data->nb_rx_queues;
+
+       args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+       args.in_args = (u8 *)queue_select;
+       args.in_args_size = len;
+       args.out_buffer = vf->aq_resp;
+       args.out_size = IAVF_AQ_BUF_SZ;
+       err = iavf_execute_vf_cmd(adapter, &args);
+       if (err)
+               PMD_DRV_LOG(ERR,
+                           "Failed to execute command of OP_ENABLE_QUEUES_V2");
+
+       rte_free(queue_select);
+       return err;
+}
+
+int
+iavf_disable_queues_lv(struct iavf_adapter *adapter)
+{
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+       struct virtchnl_del_ena_dis_queues *queue_select;
+       struct virtchnl_queue_chunk *queue_chunk;
+       struct iavf_cmd_info args;
+       int err, len;
+
+       len = sizeof(struct virtchnl_del_ena_dis_queues) +
+                 sizeof(struct virtchnl_queue_chunk) *
+                 (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+       queue_select = rte_zmalloc("queue_select", len, 0);
+       if (!queue_select)
+               return -ENOMEM;
+
+       queue_chunk = queue_select->chunks.chunks;
+       queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+       queue_select->vport_id = vf->vsi_res->vsi_id;
+
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+               adapter->eth_dev->data->nb_tx_queues;
+
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+               adapter->eth_dev->data->nb_rx_queues;
+
+       args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+       args.in_args = (u8 *)queue_select;
+       args.in_args_size = len;
+       args.out_buffer = vf->aq_resp;
+       args.out_size = IAVF_AQ_BUF_SZ;
+       err = iavf_execute_vf_cmd(adapter, &args);
+       if (err)
+               PMD_DRV_LOG(ERR,
+                           "Failed to execute command of OP_DISABLE_QUEUES_V2");
+
+       rte_free(queue_select);
+       return err;
+}
+
+int
+iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
+                bool rx, bool on)
+{
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+       struct virtchnl_del_ena_dis_queues *queue_select;
+       struct virtchnl_queue_chunk *queue_chunk;
+       struct iavf_cmd_info args;
+       int err, len;
+
+       len = sizeof(struct virtchnl_del_ena_dis_queues);
+       queue_select = rte_zmalloc("queue_select", len, 0);
+       if (!queue_select)
+               return -ENOMEM;
+
+       queue_chunk = queue_select->chunks.chunks;
+       queue_select->chunks.num_chunks = 1;
+       queue_select->vport_id = vf->vsi_res->vsi_id;
+
+       if (rx) {
+               queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX;
+               queue_chunk->start_queue_id = qid;
+               queue_chunk->num_queues = 1;
+       } else {
+               queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX;
+               queue_chunk->start_queue_id = qid;
+               queue_chunk->num_queues = 1;
+       }
+
+       if (on)
+               args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+       else
+               args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+       args.in_args = (u8 *)queue_select;
+       args.in_args_size = len;
+       args.out_buffer = vf->aq_resp;
+       args.out_size = IAVF_AQ_BUF_SZ;
+       err = iavf_execute_vf_cmd(adapter, &args);
+       if (err)
+               PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+                           on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
+
+       rte_free(queue_select);
+       return err;
+}
+
 int
 iavf_configure_rss_lut(struct iavf_adapter *adapter)
 {
@@ -644,7 +807,8 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 }
 
 int
-iavf_configure_queues(struct iavf_adapter *adapter)
+iavf_configure_queues(struct iavf_adapter *adapter,
+               uint16_t num_queue_pairs, uint16_t index)
 {
        struct iavf_rx_queue **rxq =
                (struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
@@ -658,55 +822,61 @@ iavf_configure_queues(struct iavf_adapter *adapter)
        int err;
 
        size = sizeof(*vc_config) +
-              sizeof(vc_config->qpair[0]) * vf->num_queue_pairs;
+              sizeof(vc_config->qpair[0]) * num_queue_pairs;
        vc_config = rte_zmalloc("cfg_queue", size, 0);
        if (!vc_config)
                return -ENOMEM;
 
        vc_config->vsi_id = vf->vsi_res->vsi_id;
-       vc_config->num_queue_pairs = vf->num_queue_pairs;
+       vc_config->num_queue_pairs = num_queue_pairs;
 
-       for (i = 0, vc_qp = vc_config->qpair;
-            i < vf->num_queue_pairs;
+       for (i = index, vc_qp = vc_config->qpair;
+                i < index + num_queue_pairs;
             i++, vc_qp++) {
                vc_qp->txq.vsi_id = vf->vsi_res->vsi_id;
                vc_qp->txq.queue_id = i;
-               /* Virtchnnl configure queues by pairs */
+
+               /* Virtchnnl configure tx queues by pairs */
                if (i < adapter->eth_dev->data->nb_tx_queues) {
                        vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
                        vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
                }
+
                vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id;
                vc_qp->rxq.queue_id = i;
                vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
-               /* Virtchnnl configure queues by pairs */
-               if (i < adapter->eth_dev->data->nb_rx_queues) {
-                       vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
-                       vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
-                       vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
-               }
+
+               if (i >= adapter->eth_dev->data->nb_rx_queues)
+                       continue;
+
+               /* Virtchnnl configure rx queues by pairs */
+               vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
+               vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
+               vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
                if (vf->vf_res->vf_cap_flags &
-                       VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
-                       vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
-                       vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
-                       PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
-                                       "Queue[%d]", vc_qp->rxq.rxdid, i);
+                   VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+                   vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
+                       vc_qp->rxq.rxdid = rxq[i]->rxdid;
+                       PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+                                   vc_qp->rxq.rxdid, i);
                } else {
+                       PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
+                                   "request default RXDID[%d] in Queue[%d]",
+                                   rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
                        vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
-                       PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
-                                       "Queue[%d]", vc_qp->rxq.rxdid, i);
                }
 #else
                if (vf->vf_res->vf_cap_flags &
                        VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
                        vf->supported_rxdid & BIT(IAVF_RXDID_LEGACY_0)) {
                        vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
-                       PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
-                                       "Queue[%d]", vc_qp->rxq.rxdid, i);
+                       PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+                                   vc_qp->rxq.rxdid, i);
                } else {
-                       PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
+                       PMD_DRV_LOG(ERR, "RXDID[%d] is not supported",
+                                   IAVF_RXDID_LEGACY_0);
                        return -1;
                }
 #endif
@@ -745,13 +915,14 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
                return -ENOMEM;
 
        map_info->num_vectors = vf->nb_msix;
-       for (i = 0; i < vf->nb_msix; i++) {
-               vecmap = &map_info->vecmap[i];
+       for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+               vecmap =
+                   &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
                vecmap->vsi_id = vf->vsi_res->vsi_id;
                vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT;
-               vecmap->vector_id = vf->msix_base + i;
+               vecmap->vector_id = vf->qv_map[i].vector_id;
                vecmap->txq_map = 0;
-               vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
+               vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id;
        }
 
        args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
@@ -767,6 +938,47 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
        return err;
 }
 
+int
+iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
+               uint16_t index)
+{
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+       struct virtchnl_queue_vector_maps *map_info;
+       struct virtchnl_queue_vector *qv_maps;
+       struct iavf_cmd_info args;
+       int len, i, err;
+       int count = 0;
+
+       len = sizeof(struct virtchnl_queue_vector_maps) +
+             sizeof(struct virtchnl_queue_vector) * (num - 1);
+
+       map_info = rte_zmalloc("map_info", len, 0);
+       if (!map_info)
+               return -ENOMEM;
+
+       map_info->vport_id = vf->vsi_res->vsi_id;
+       map_info->num_qv_maps = num;
+       for (i = index; i < index + map_info->num_qv_maps; i++) {
+               qv_maps = &map_info->qv_maps[count++];
+               qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0;
+               qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX;
+               qv_maps->queue_id = vf->qv_map[i].queue_id;
+               qv_maps->vector_id = vf->qv_map[i].vector_id;
+       }
+
+       args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR;
+       args.in_args = (u8 *)map_info;
+       args.in_args_size = len;
+       args.out_buffer = vf->aq_resp;
+       args.out_size = IAVF_AQ_BUF_SZ;
+       err = iavf_execute_vf_cmd(adapter, &args);
+       if (err)
+               PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
+
+       rte_free(map_info);
+       return err;
+}
+
 void
 iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 {
@@ -1131,6 +1343,29 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
        return err;
 }
 
+int
+iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
+{
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+       struct virtchnl_rss_hena vrh;
+       struct iavf_cmd_info args;
+       int err;
+
+       vrh.hena = hena;
+       args.ops = VIRTCHNL_OP_SET_RSS_HENA;
+       args.in_args = (u8 *)&vrh;
+       args.in_args_size = sizeof(vrh);
+       args.out_buffer = vf->aq_resp;
+       args.out_size = IAVF_AQ_BUF_SZ;
+
+       err = iavf_execute_vf_cmd(adapter, &args);
+       if (err)
+               PMD_DRV_LOG(ERR,
+                           "Failed to execute command of OP_SET_RSS_HENA");
+
+       return err;
+}
+
 int
 iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
                        struct rte_ether_addr *mc_addrs,
@@ -1183,3 +1418,87 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 
        return 0;
 }
+
+int
+iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
+{
+       struct rte_eth_dev *dev = adapter->eth_dev;
+       struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct virtchnl_vf_res_request vfres;
+       struct iavf_cmd_info args;
+       uint16_t num_queue_pairs;
+       int err;
+
+       if (!(vf->vf_res->vf_cap_flags &
+               VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
+               PMD_DRV_LOG(ERR, "request queues not supported");
+               return -1;
+       }
+
+       if (num == 0) {
+               PMD_DRV_LOG(ERR, "queue number cannot be zero");
+               return -1;
+       }
+       vfres.num_queue_pairs = num;
+
+       args.ops = VIRTCHNL_OP_REQUEST_QUEUES;
+       args.in_args = (u8 *)&vfres;
+       args.in_args_size = sizeof(vfres);
+       args.out_buffer = vf->aq_resp;
+       args.out_size = IAVF_AQ_BUF_SZ;
+
+       /*
+        * disable interrupt to avoid the admin queue message to be read
+        * before iavf_read_msg_from_pf.
+        */
+       rte_intr_disable(&pci_dev->intr_handle);
+       err = iavf_execute_vf_cmd(adapter, &args);
+       rte_intr_enable(&pci_dev->intr_handle);
+       if (err) {
+               PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
+               return err;
+       }
+
+       /* request queues succeeded, vf is resetting */
+       if (vf->vf_reset) {
+               PMD_DRV_LOG(INFO, "vf is resetting");
+               return 0;
+       }
+
+       /* request additional queues failed, return available number */
+       num_queue_pairs =
+         ((struct virtchnl_vf_res_request *)args.out_buffer)->num_queue_pairs;
+       PMD_DRV_LOG(ERR, "request queues failed, only %u queues "
+               "available", num_queue_pairs);
+
+       return -1;
+}
+
+int
+iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
+{
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+       struct iavf_cmd_info args;
+       uint16_t qregion_width;
+       int err;
+
+       args.ops = VIRTCHNL_OP_GET_MAX_RSS_QREGION;
+       args.in_args = NULL;
+       args.in_args_size = 0;
+       args.out_buffer = vf->aq_resp;
+       args.out_size = IAVF_AQ_BUF_SZ;
+
+       err = iavf_execute_vf_cmd(adapter, &args);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
+               return err;
+       }
+
+       qregion_width =
+       ((struct virtchnl_max_rss_qregion *)args.out_buffer)->qregion_width;
+
+       vf->max_rss_qregion = (uint16_t)(1 << qregion_width);
+
+       return 0;
+}