-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <sys/queue.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_memzone.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
I40E_WRITE_FLUSH(hw);
+ }
#define VFRESET_MAX_WAIT_CNT 100
- /* Wait until VF reset is done */
- for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
- rte_delay_us(10);
- val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
- if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
- break;
- }
+ /* Wait until VF reset is done */
+ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
+ rte_delay_us(10);
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
+ if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
+ break;
+ }
- if (i >= VFRESET_MAX_WAIT_CNT) {
- PMD_DRV_LOG(ERR, "VF reset timeout");
- return -ETIMEDOUT;
- }
- vf->state = I40E_VF_ACTIVE;
+ if (i >= VFRESET_MAX_WAIT_CNT) {
+ PMD_DRV_LOG(ERR, "VF reset timeout");
+ return -ETIMEDOUT;
}
/* This is not first time to do reset, do cleanup job first */
if (vf->vsi) {
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
- int ret = I40E_ERR_ADMIN_QUEUE_ERROR;
-
- if (vf->state == I40E_VF_INACTIVE)
- return ret;
+ int ret;
ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
msg, msglen, NULL);
}
static void
-i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
+i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, uint8_t *msg,
+ bool b_op)
{
struct virtchnl_version_info info;
- /* Respond like a Linux PF host in order to support both DPDK VF and
- * Linux VF driver. The expense is original DPDK host specific feature
+ /* VF and PF drivers need to follow the Virtchnl definition, No matter
+ * it's DPDK or other kernel drivers.
+ * The original DPDK host specific feature
* like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
- *
- * DPDK VF also can't identify host driver by version number returned.
- * It always assume talking with Linux PF.
*/
+
info.major = VIRTCHNL_VERSION_MAJOR;
- info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ vf->version = *(struct virtchnl_version_info *)msg;
+ if (VF_IS_V10(&vf->version))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ else
+ info.minor = VIRTCHNL_VERSION_MINOR;
if (b_op)
i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
}
static int
-i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
+i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, uint8_t *msg,
+ bool b_op)
{
struct virtchnl_vf_resource *vf_res = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint32_t len = 0;
+ uint64_t default_hena = I40E_RSS_HENA_ALL;
int ret = I40E_SUCCESS;
if (!b_op) {
goto send_msg;
}
- vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_VLAN;
+ if (VF_IS_V10(&vf->version)) /* doesn't support offload negotiate */
+ vf->request_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+ else
+ vf->request_caps = *(uint32_t *)msg;
+
+ /* enable all RSS by default,
+ * doesn't support hena setting by virtchnnl yet.
+ */
+ if (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx),
+ (uint32_t)default_hena);
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA1(1, vf->vf_idx),
+ (uint32_t)(default_hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+ }
+
+ vf_res->vf_cap_flags = vf->request_caps &
+ I40E_VIRTCHNL_OFFLOAD_CAPS;
+ /* For X722, it supports write back on ITR
+ * without binding queue to interrupt vector.
+ */
+ if (hw->mac.type == I40E_MAC_X722)
+ vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
vf_res->num_queue_pairs = vf->vsi->nb_qps;
vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
+ vf_res->rss_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * 4;
+ vf_res->rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4;
/* Change below setting if PF host can support more VSIs for VF */
vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
- ether_addr_copy(&vf->mac_addr,
- (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
+ rte_ether_addr_copy(&vf->mac_addr,
+ (struct rte_ether_addr *)vf_res->vsi_res[0].default_mac_addr);
send_msg:
i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
return ret;
}
-static int
-i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
- uint8_t *msg,
- uint16_t msglen,
- bool b_op)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
- struct i40e_vsi *vsi = vf->vsi;
- struct virtchnl_vsi_queue_config_ext_info *vc_vqcei =
- (struct virtchnl_vsi_queue_config_ext_info *)msg;
- struct virtchnl_queue_pair_ext_info *vc_qpei;
- int i, ret = I40E_SUCCESS;
-
- if (!b_op) {
- i40e_pf_host_send_msg_to_vf(
- vf,
- VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
- I40E_NOT_SUPPORTED, NULL, 0);
- return ret;
- }
-
- if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
- vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
- msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
- vc_vqcei->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
-
- vc_qpei = vc_vqcei->qpair;
- for (i = 0; i < vc_vqcei->num_queue_pairs; i++) {
- if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 ||
- vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) {
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
- /*
- * Apply VF RX queue setting to HMC.
- * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
- * then the extra information of
- * 'struct virtchnl_queue_pair_ext_info' is needed,
- * otherwise set the last parameter to NULL.
- */
- if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
- vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
-
- /* Apply VF TX queue setting to HMC */
- if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) !=
- I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
- }
-
-send_msg:
- i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
- ret, NULL, 0);
-
- return ret;
-}
-
static void
i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
struct virtchnl_vector_map *vvm)
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
int i;
- uint16_t vector_id;
+ uint16_t vector_id, itr_idx;
unsigned long qbit_max;
if (!b_op) {
vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
vf->vsi->nb_msix = irqmap->num_vectors;
vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+ itr_idx = irqmap->vecmap[0].rxitr_idx;
/* Don't care how the TX/RX queue mapping with this vector.
* Link all VF RX queues together. Only did mapping work.
* VF can disable/enable the intr by itself.
*/
- i40e_vsi_queues_bind_intr(vf->vsi);
+ i40e_vsi_queues_bind_intr(vf->vsi, itr_idx);
goto send_msg;
}
(struct virtchnl_ether_addr_list *)msg;
struct i40e_mac_filter_info filter;
int i;
- struct ether_addr *mac;
+ struct rte_ether_addr *mac;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
}
for (i = 0; i < addr_list->num_elements; i++) {
- mac = (struct ether_addr *)(addr_list->list[i].addr);
- (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
+ mac = (struct rte_ether_addr *)(addr_list->list[i].addr);
+ rte_memcpy(&filter.mac_addr, mac, RTE_ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
- if (is_zero_ether_addr(mac) ||
+ if (rte_is_zero_ether_addr(mac) ||
i40e_vsi_add_mac(vf->vsi, &filter)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
struct virtchnl_ether_addr_list *addr_list =
(struct virtchnl_ether_addr_list *)msg;
int i;
- struct ether_addr *mac;
+ struct rte_ether_addr *mac;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
}
for (i = 0; i < addr_list->num_elements; i++) {
- mac = (struct ether_addr *)(addr_list->list[i].addr);
- if(is_zero_ether_addr(mac) ||
+ mac = (struct rte_ether_addr *)(addr_list->list[i].addr);
+ if (rte_is_zero_ether_addr(mac) ||
i40e_vsi_delete_mac(vf->vsi, mac)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
}
static int
-i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
- uint8_t *msg,
- uint16_t msglen,
- bool b_op)
+i40e_pf_host_process_cmd_set_rss_lut(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
{
+ struct virtchnl_rss_lut *rss_lut = (struct virtchnl_rss_lut *)msg;
+ uint16_t valid_len;
int ret = I40E_SUCCESS;
- struct virtchnl_pvid_info *tpid_info =
- (struct virtchnl_pvid_info *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
+ VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
- if (msg == NULL || msglen != sizeof(*tpid_info)) {
+ if (!msg || msglen <= sizeof(struct virtchnl_rss_lut)) {
+ PMD_DRV_LOG(ERR, "set_rss_lut argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ valid_len = sizeof(struct virtchnl_rss_lut) + rss_lut->lut_entries - 1;
+ if (msglen < valid_len) {
+ PMD_DRV_LOG(ERR, "set_rss_lut length mismatch");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ ret = i40e_set_rss_lut(vf->vsi, rss_lut->lut, rss_lut->lut_entries);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_set_rss_key(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ struct virtchnl_rss_key *rss_key = (struct virtchnl_rss_key *)msg;
+ uint16_t valid_len;
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DEL_VLAN,
+ VIRTCHNL_OP_CONFIG_RSS_KEY, NULL, 0);
+ return ret;
+ }
+
+ if (!msg || msglen <= sizeof(struct virtchnl_rss_key)) {
+ PMD_DRV_LOG(ERR, "set_rss_key argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ valid_len = sizeof(struct virtchnl_rss_key) + rss_key->key_len - 1;
+ if (msglen < valid_len) {
+ PMD_DRV_LOG(ERR, "set_rss_key length mismatch");
ret = I40E_ERR_PARAM;
goto send_msg;
}
- ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info);
+ ret = i40e_set_rss_key(vf->vsi, rss_key->key, rss_key->key_len);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
- ret, NULL, 0);
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
+ ret, NULL, 0);
return ret;
}
void
i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct virtchnl_pf_event event;
+ uint16_t vf_id = vf->vf_idx;
+ uint32_t tval, rval;
event.event = VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
break;
}
- i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
- I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+ tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id));
+ rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id));
+
+ if (tval & I40E_VF_ATQLEN_ATQLEN_MASK ||
+ tval & I40E_VF_ATQLEN_ATQENABLE_MASK ||
+ rval & I40E_VF_ARQLEN_ARQLEN_MASK ||
+ rval & I40E_VF_ARQLEN_ARQENABLE_MASK)
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the VF structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+static void
+i40e_vc_notify_vf_reset(struct i40e_pf_vf *vf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct virtchnl_pf_event pfe;
+ int abs_vf_id;
+ uint16_t vf_id = vf->vf_idx;
+
+ abs_vf_id = vf_id + hw->func_caps.vf_base_id;
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(struct virtchnl_pf_event), NULL);
+}
+
+static int
+i40e_pf_host_process_cmd_request_queues(struct i40e_pf_vf *vf, uint8_t *msg)
+{
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ struct i40e_pf *pf;
+ uint32_t req_pairs = vfres->num_queue_pairs;
+ uint32_t cur_pairs = vf->vsi->nb_used_qps;
+
+ pf = vf->pf;
+
+ if (!rte_is_power_of_2(req_pairs))
+ req_pairs = i40e_align_floor(req_pairs) << 1;
+
+ if (req_pairs == 0) {
+ PMD_DRV_LOG(ERR, "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_idx);
+ } else if (req_pairs > I40E_MAX_QP_NUM_PER_VF) {
+ PMD_DRV_LOG(ERR,
+ "VF %d tried to request more than %d queues.\n",
+ vf->vf_idx,
+ I40E_MAX_QP_NUM_PER_VF);
+ vfres->num_queue_pairs = I40E_MAX_QP_NUM_PER_VF;
+ } else if (req_pairs > cur_pairs + pf->qp_pool.num_free) {
+ PMD_DRV_LOG(ERR, "VF %d requested %d queues (rounded to %d) "
+ "but only %d available\n",
+ vf->vf_idx,
+ vfres->num_queue_pairs,
+ req_pairs,
+ cur_pairs + pf->qp_pool.num_free);
+ vfres->num_queue_pairs = i40e_align_floor(pf->qp_pool.num_free +
+ cur_pairs);
+ } else {
+ i40e_vc_notify_vf_reset(vf);
+ vf->vsi->nb_qps = req_pairs;
+ pf->vf_nb_qps = req_pairs;
+ i40e_pf_host_process_cmd_reset_vf(vf);
+
+ return 0;
+ }
+
+ return i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
+ (u8 *)vfres, sizeof(*vfres));
}
void
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
struct rte_pmd_i40e_mb_event_param ret_param;
+ uint64_t first_cycle, cur_cycle;
bool b_op = TRUE;
+ int ret;
if (vf_id > pf->vf_num - 1 || !pf->vfs) {
PMD_DRV_LOG(ERR, "invalid argument");
}
vf = &pf->vfs[vf_id];
+
+ cur_cycle = rte_get_timer_cycles();
+
+ /* if the VF being blocked, ignore the message and return */
+ if (cur_cycle < vf->ignore_end_cycle)
+ return;
+
if (!vf->vsi) {
PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
i40e_pf_host_send_msg_to_vf(vf, opcode,
I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
- return;
+ goto check;
+ }
+
+ /* perform basic checks on the msg */
+ ret = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msglen);
+
+ /* perform additional checks specific to this driver */
+ if (opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
+ struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len != ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4))
+ ret = VIRTCHNL_ERR_PARAM;
+ } else if (opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries != ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4))
+ ret = VIRTCHNL_ERR_PARAM;
+ }
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Invalid message from VF %u, opcode %u, len %u",
+ vf_id, opcode, msglen);
+ i40e_pf_host_send_msg_to_vf(vf, opcode,
+ I40E_ERR_PARAM, NULL, 0);
+ goto check;
}
/**
* do nothing and send not_supported to VF. As PF must send a response
* to VF and ACK/NACK is not defined.
*/
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
- NULL, &ret_param);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
opcode);
switch (opcode) {
case VIRTCHNL_OP_VERSION:
PMD_DRV_LOG(INFO, "OP_VERSION received");
- i40e_pf_host_process_cmd_version(vf, b_op);
+ i40e_pf_host_process_cmd_version(vf, msg, b_op);
break;
case VIRTCHNL_OP_RESET_VF:
PMD_DRV_LOG(INFO, "OP_RESET_VF received");
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
- i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
+ i40e_pf_host_process_cmd_get_vf_resource(vf, msg, b_op);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
msglen, b_op);
break;
- case VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
- PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
- i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
- msglen, b_op);
- break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
- PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
- i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_LUT received");
+ i40e_pf_host_process_cmd_set_rss_lut(vf, msg, msglen, b_op);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_KEY received");
+ i40e_pf_host_process_cmd_set_rss_key(vf, msg, msglen, b_op);
break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_REQUEST_QUEUES received");
+ i40e_pf_host_process_cmd_request_queues(vf, msg);
+ break;
+
/* Don't add command supported below, which will
* return an error code.
*/
NULL, 0);
break;
}
+
+check:
+ /* if message validation not enabled */
+ if (!pf->vf_msg_cfg.max_msg)
+ return;
+
+ /* store current cycle */
+ vf->msg_timestamps[vf->msg_index++] = cur_cycle;
+ vf->msg_index %= pf->vf_msg_cfg.max_msg;
+
+ /* read the timestamp of earliest message */
+ first_cycle = vf->msg_timestamps[vf->msg_index];
+
+ /*
+ * If the time span from the arrival time of first message to
+ * the arrival time of current message smaller than `period`,
+ * that mean too much message in this statistic period.
+ */
+ if (first_cycle && cur_cycle < first_cycle +
+ (uint64_t)pf->vf_msg_cfg.period * rte_get_timer_hz()) {
+ PMD_DRV_LOG(WARNING, "VF %u too much messages(%u in %u"
+ " seconds),\n\tany new message from which"
+ " will be ignored during next %u seconds!",
+ vf_id, pf->vf_msg_cfg.max_msg,
+ (uint32_t)((cur_cycle - first_cycle +
+ rte_get_timer_hz() - 1) / rte_get_timer_hz()),
+ pf->vf_msg_cfg.ignore_second);
+ vf->ignore_end_cycle = rte_get_timer_cycles() +
+ pf->vf_msg_cfg.ignore_second *
+ rte_get_timer_hz();
+ }
}
int
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ size_t size;
int ret, i;
uint32_t val;
I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
I40E_WRITE_FLUSH(hw);
+ /* calculate the memory size for storing timestamp of messages */
+ size = pf->vf_msg_cfg.max_msg * sizeof(uint64_t);
+
for (i = 0; i < pf->vf_num; i++) {
pf->vfs[i].pf = pf;
pf->vfs[i].state = I40E_VF_INACTIVE;
pf->vfs[i].vf_idx = i;
+
+ if (size) {
+ /* allocate memory for store timestamp of messages */
+ pf->vfs[i].msg_timestamps =
+ rte_zmalloc("i40e_pf_vf", size, 0);
+ if (pf->vfs[i].msg_timestamps == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+
ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
if (ret != I40E_SUCCESS)
goto fail;
return I40E_SUCCESS;
fail:
+ for (; i >= 0; i--)
+ rte_free(pf->vfs[i].msg_timestamps);
rte_free(pf->vfs);
i40e_pf_enable_irq0(hw);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t val;
+ int i;
PMD_INIT_FUNC_TRACE();
(pf->vf_nb_qps == 0))
return I40E_SUCCESS;
+ /* free memory for store timestamp of messages */
+ for (i = 0; i < pf->vf_num; i++)
+ rte_free(pf->vfs[i].msg_timestamps);
+
/* free memory to store VF structure */
rte_free(pf->vfs);
pf->vfs = NULL;