#define IAVF_MAX_NUM_QUEUES_DFLT 16
#define IAVF_MAX_NUM_QUEUES_LV 256
#define IAVF_CFG_Q_NUM_PER_BUF 32
+#define IAVF_IRQ_MAP_NUM_PER_BUF 128
#define IAVF_NUM_MACADDR_MAX 64
struct iavf_fdir_conf conf;
};
-/* TODO: is that correct to assume the max number to be 16 ?*/
-#define IAVF_MAX_MSIX_VECTORS 16
+struct iavf_qv_map {
+ uint16_t queue_id;
+ uint16_t vector_id;
+};
/* Message type read in admin queue from PF */
enum iavf_aq_result {
uint16_t nb_msix; /* number of MSI-X interrupts on Rx */
uint16_t msix_base; /* msix vector base from */
uint16_t max_rss_qregion; /* max RSS queue region supported by PF */
- /* queue bitmask for each vector */
- uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS];
+ struct iavf_qv_map *qv_map; /* queue vector mapping */
struct iavf_flow_list flow_list;
rte_spinlock_t flow_ops_lock;
struct iavf_parser_list rss_parser_list;
uint16_t num_queue_pairs, uint16_t index);
int iavf_get_supported_rxdid(struct iavf_adapter *adapter);
int iavf_config_irq_map(struct iavf_adapter *adapter);
+int iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
+ uint16_t index);
void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add);
int iavf_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete);
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct iavf_qv_map *qv_map;
uint16_t interval, i;
int vec;
}
}
+ qv_map = rte_zmalloc("qv_map",
+ dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
+ if (!qv_map) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ dev->data->nb_rx_queues);
+ return -1;
+ }
+
if (!dev->data->dev_conf.intr_conf.rxq ||
!rte_intr_dp_is_en(intr_handle)) {
/* Rx interrupt disabled, Map interrupt only for writeback */
}
IAVF_WRITE_FLUSH(hw);
/* map all queues to the same interrupt */
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- vf->rxq_map[vf->msix_base] |= 1 << i;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ qv_map[i].queue_id = i;
+ qv_map[i].vector_id = vf->msix_base;
+ }
+ vf->qv_map = qv_map;
} else {
if (!rte_intr_allow_others(intr_handle)) {
vf->nb_msix = 1;
vf->msix_base = IAVF_MISC_VEC_ID;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- vf->rxq_map[vf->msix_base] |= 1 << i;
+ qv_map[i].queue_id = i;
+ qv_map[i].vector_id = vf->msix_base;
intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
}
+ vf->qv_map = qv_map;
PMD_DRV_LOG(DEBUG,
"vector %u are mapping to all Rx queues",
vf->msix_base);
vf->msix_base = IAVF_RX_VEC_START;
vec = IAVF_RX_VEC_START;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- vf->rxq_map[vec] |= 1 << i;
+ qv_map[i].queue_id = i;
+ qv_map[i].vector_id = vec;
intr_handle->intr_vec[i] = vec++;
if (vec >= vf->nb_msix)
vec = IAVF_RX_VEC_START;
}
+ vf->qv_map = qv_map;
PMD_DRV_LOG(DEBUG,
"%u vectors are mapping to %u Rx queues",
vf->nb_msix, dev->data->nb_rx_queues);
}
}
- if (iavf_config_irq_map(adapter)) {
- PMD_DRV_LOG(ERR, "config interrupt mapping failed");
- return -1;
+ if (!vf->lv_enabled) {
+ if (iavf_config_irq_map(adapter)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+ return -1;
+ }
+ } else {
+ uint16_t num_qv_maps = dev->data->nb_rx_queues;
+ uint16_t index = 0;
+
+ while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) {
+ if (iavf_config_irq_map_lv(adapter,
+ IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
+ return -1;
+ }
+ num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
+ index += IAVF_IRQ_MAP_NUM_PER_BUF;
+ }
+
+ if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
+ return -1;
+ }
}
return 0;
}
return -ENOMEM;
map_info->num_vectors = vf->nb_msix;
- for (i = 0; i < vf->nb_msix; i++) {
- vecmap = &map_info->vecmap[i];
+ for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+ vecmap =
+ &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
vecmap->vsi_id = vf->vsi_res->vsi_id;
vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT;
- vecmap->vector_id = vf->msix_base + i;
+ vecmap->vector_id = vf->qv_map[i].vector_id;
vecmap->txq_map = 0;
- vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
+ vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id;
}
args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
return err;
}
+int
+iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
+ uint16_t index)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_vector_maps *map_info;
+ struct virtchnl_queue_vector *qv_maps;
+ struct iavf_cmd_info args;
+ int len, i, err;
+ int count = 0;
+
+ len = sizeof(struct virtchnl_queue_vector_maps) +
+ sizeof(struct virtchnl_queue_vector) * (num - 1);
+
+ map_info = rte_zmalloc("map_info", len, 0);
+ if (!map_info)
+ return -ENOMEM;
+
+ map_info->vport_id = vf->vsi_res->vsi_id;
+ map_info->num_qv_maps = num;
+ for (i = index; i < index + map_info->num_qv_maps; i++) {
+ qv_maps = &map_info->qv_maps[count++];
+ qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0;
+ qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX;
+ qv_maps->queue_id = vf->qv_map[i].queue_id;
+ qv_maps->vector_id = vf->qv_map[i].vector_id;
+ }
+
+ args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR;
+ args.in_args = (u8 *)map_info;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
+
+ rte_free(map_info);
+ return err;
+}
+
void
iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
{