i40evf_configure_queues(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_vsi_queue_config_info *queue_info;
- struct i40e_virtchnl_queue_pair_info *queue_cfg;
struct i40e_rx_queue **rxq =
(struct i40e_rx_queue **)dev->data->rx_queues;
struct i40e_tx_queue **txq =
(struct i40e_tx_queue **)dev->data->tx_queues;
- int i, len, nb_qpairs, num_rxq, num_txq;
- int err;
+ struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
+ struct i40e_virtchnl_queue_pair_info *vc_qpi;
struct vf_cmd_info args;
- struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t i, nb_qp = vf->num_queue_pairs;
+ const uint32_t size =
+ I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
+ uint8_t buff[size];
+ int ret;
- nb_qpairs = vf->num_queue_pairs;
- len = sizeof(*queue_info) + sizeof(*queue_cfg) * nb_qpairs;
- queue_info = rte_zmalloc("queue_info", len, 0);
- if (queue_info == NULL) {
- PMD_INIT_LOG(ERR, "failed alloc memory for queue_info");
- return -1;
- }
- queue_info->vsi_id = vf->vsi_res->vsi_id;
- queue_info->num_queue_pairs = nb_qpairs;
- queue_cfg = queue_info->qpair;
+ memset(buff, 0, sizeof(buff));
+ vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
+ vc_vqci->vsi_id = vf->vsi_res->vsi_id;
+ vc_vqci->num_queue_pairs = nb_qp;
+ vc_qpi = vc_vqci->qpair;
- num_rxq = dev->data->nb_rx_queues;
- num_txq = dev->data->nb_tx_queues;
/*
* PF host driver required to configure queues in pairs, which means
* rxq_num should equals to txq_num. The actual usage won't always
* work that way. The solution is fills 0 with HW ring option in case
* they are not equal.
*/
- for (i = 0; i < nb_qpairs; i++) {
+ for (i = 0; i < nb_qp; i++) {
/*Fill TX info */
- queue_cfg->txq.vsi_id = queue_info->vsi_id;
- queue_cfg->txq.queue_id = i;
- if (i < num_txq) {
- queue_cfg->txq.ring_len = txq[i]->nb_tx_desc;
- queue_cfg->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
+ vc_qpi->txq.vsi_id = vc_vqci->vsi_id;
+ vc_qpi->txq.queue_id = i;
+ if (i < dev->data->nb_tx_queues) {
+ vc_qpi->txq.ring_len = txq[i]->nb_tx_desc;
+ vc_qpi->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
} else {
- queue_cfg->txq.ring_len = 0;
- queue_cfg->txq.dma_ring_addr = 0;
+ vc_qpi->txq.ring_len = 0;
+ vc_qpi->txq.dma_ring_addr = 0;
}
/* Fill RX info */
- queue_cfg->rxq.vsi_id = queue_info->vsi_id;
- queue_cfg->rxq.queue_id = i;
- queue_cfg->rxq.max_pkt_size = vf->max_pkt_len;
- if (i < num_rxq) {
- mbp_priv = rte_mempool_get_priv(rxq[i]->mp);
- queue_cfg->rxq.databuffer_size = mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM;;
- queue_cfg->rxq.ring_len = rxq[i]->nb_rx_desc;
- queue_cfg->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;;
+ vc_qpi->rxq.vsi_id = vc_vqci->vsi_id;
+ vc_qpi->rxq.queue_id = i;
+ vc_qpi->rxq.max_pkt_size = vf->max_pkt_len;
+ if (i < dev->data->nb_rx_queues) {
+ struct rte_pktmbuf_pool_private *mbp_priv =
+ rte_mempool_get_priv(rxq[i]->mp);
+
+ vc_qpi->rxq.databuffer_size =
+ mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM;
+ vc_qpi->rxq.ring_len = rxq[i]->nb_rx_desc;
+ vc_qpi->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
} else {
- queue_cfg->rxq.ring_len = 0;
- queue_cfg->rxq.dma_ring_addr = 0;
- queue_cfg->rxq.databuffer_size = 0;
+ vc_qpi->rxq.ring_len = 0;
+ vc_qpi->rxq.dma_ring_addr = 0;
+ vc_qpi->rxq.databuffer_size = 0;
}
- queue_cfg++;
+ vc_qpi++;
}
+ memset(&args, 0, sizeof(args));
args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
- args.in_args = (u8 *)queue_info;
- args.in_args_size = len;
+ args.in_args = (uint8_t *)vc_vqci;
+ args.in_args_size = size;
args.out_buffer = cmd_result_buffer;
args.out_size = I40E_AQ_BUF_SZ;
- err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_CONFIG_VSI_QUEUES");
- rte_free(queue_info);
- return err;
+ return ret;
}
static int
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
- int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vsi_queue_config_info *qconfig =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
- int i;
- struct i40e_virtchnl_queue_pair_info *qpair;
+ struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ int i, ret = I40E_SUCCESS;
- if (msg == NULL || msglen <= sizeof(*qconfig) ||
- qconfig->num_queue_pairs > vsi->nb_qps) {
+ if (msg == NULL || msglen <= sizeof(*vc_vqci) ||
+ vc_vqci->num_queue_pairs > vsi->nb_qps) {
PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
- qpair = qconfig->qpair;
- for (i = 0; i < qconfig->num_queue_pairs; i++) {
- if (qpair[i].rxq.queue_id > vsi->nb_qps - 1 ||
- qpair[i].txq.queue_id > vsi->nb_qps - 1) {
+ vc_qpi = vc_vqci->qpair;
+ for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
+ if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
+ vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
ret = I40E_ERR_PARAM;
goto send_msg;
}
/* Apply VF RX queue setting to HMC */
- if (i40e_pf_host_hmc_config_rxq(hw, vf, &qpair[i].rxq)
+ if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq)
!= I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
ret = I40E_ERR_PARAM;
}
/* Apply VF TX queue setting to HMC */
- if (i40e_pf_host_hmc_config_txq(hw, vf, &qpair[i].txq)
+ if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpi[i].txq)
!= I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
ret = I40E_ERR_PARAM;
send_msg:
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
ret, NULL, 0);
+
return ret;
}
return ret;
}
-
static int
i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
uint8_t *msg, uint16_t msglen)
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
- i40e_pf_host_process_cmd_config_vsi_queues(vf,
- msg, msglen);
+ i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
- i40e_pf_host_process_cmd_enable_queues(vf,
- msg, msglen);
+ i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
- i40e_pf_host_process_cmd_disable_queues(vf,
- msg, msglen);
+ i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_add_ether_address(vf,
- msg, msglen);
+ i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen);
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_del_ether_address(vf,
- msg, msglen);
+ i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
PMD_DRV_LOG(ERR, "OP_FCOE received, not supported");
default:
PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
- i40e_pf_host_send_msg_to_vf(vf, opcode,
- I40E_ERR_PARAM, NULL, 0);
+ i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
+ NULL, 0);
break;
}
}
/* Default setting on number of VSIs that VF can contain */
#define I40E_DEFAULT_VF_VSI_NUM 1
+#define I40E_DPDK_OFFSET 0x100
+
enum i40e_pf_vfr_state {
I40E_PF_VFR_INPROGRESS = 0,
I40E_PF_VFR_COMPLETED = 1,
};
/* DPDK pf driver specific command to VF */
-enum i40e_virtchnl_ops_DPDK {
- /* Keep some gap between Linu PF commands and DPDK PF specific commands */
- I40E_VIRTCHNL_OP_GET_LINK_STAT = I40E_VIRTCHNL_OP_EVENT + 0x100,
+enum i40e_virtchnl_ops_dpdk {
+ /*
+ * Keep some gap between Linux PF commands and
+ * DPDK PF extended commands.
+ */
+ I40E_VIRTCHNL_OP_GET_LINK_STAT = I40E_VIRTCHNL_OP_VERSION +
+ I40E_DPDK_OFFSET,
I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
};
+
struct i40e_virtchnl_vlan_offload_info {
uint16_t vsi_id;
uint8_t enable_vlan_strip;
uint8_t reserved;
};
-/* I40E_VIRTCHNL_OP_CFG_VLAN_PVID
- * VF sends this message to enable/disable pvid. If it's enable op, needs to specify the
- * pvid.
- * PF returns status code in retval.
+/*
+ * Macro to calculate the memory size for configuring VSI queues
+ * via virtual channel.
+ */
+#define I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(x, n) \
+ (sizeof(*(x)) + sizeof((x)->qpair[0]) * (n))
+
+/*
+ * I40E_VIRTCHNL_OP_CFG_VLAN_PVID
+ * VF sends this message to enable/disable pvid. If it's
+ * enable op, needs to specify the pvid. PF returns status
+ * code in retval.
*/
struct i40e_virtchnl_pvid_info {
uint16_t vsi_id;