#include <rte_string_fns.h>
#include <rte_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
- ether_addr_copy(&vf->mac_addr,
- (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
+ rte_ether_addr_copy(&vf->mac_addr,
+ (struct rte_ether_addr *)vf_res->vsi_res[0].default_mac_addr);
send_msg:
i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
(struct virtchnl_ether_addr_list *)msg;
struct i40e_mac_filter_info filter;
int i;
- struct ether_addr *mac;
+ struct rte_ether_addr *mac;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
}
for (i = 0; i < addr_list->num_elements; i++) {
- mac = (struct ether_addr *)(addr_list->list[i].addr);
- rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
+ mac = (struct rte_ether_addr *)(addr_list->list[i].addr);
+ rte_memcpy(&filter.mac_addr, mac, RTE_ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
- if (is_zero_ether_addr(mac) ||
+ if (rte_is_zero_ether_addr(mac) ||
i40e_vsi_add_mac(vf->vsi, &filter)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
struct virtchnl_ether_addr_list *addr_list =
(struct virtchnl_ether_addr_list *)msg;
int i;
- struct ether_addr *mac;
+ struct rte_ether_addr *mac;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
}
for (i = 0; i < addr_list->num_elements; i++) {
- mac = (struct ether_addr *)(addr_list->list[i].addr);
- if(is_zero_ether_addr(mac) ||
+ mac = (struct rte_ether_addr *)(addr_list->list[i].addr);
+ if (rte_is_zero_ether_addr(mac) ||
i40e_vsi_delete_mac(vf->vsi, mac)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the VF structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+static void
+i40e_vc_notify_vf_reset(struct i40e_pf_vf *vf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct virtchnl_pf_event pfe;
+ int abs_vf_id;
+ uint16_t vf_id = vf->vf_idx;
+
+ abs_vf_id = vf_id + hw->func_caps.vf_base_id;
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(struct virtchnl_pf_event), NULL);
+}
+
+static int
+i40e_pf_host_process_cmd_request_queues(struct i40e_pf_vf *vf, uint8_t *msg)
+{
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ struct i40e_pf *pf;
+ uint32_t req_pairs = vfres->num_queue_pairs;
+ uint32_t cur_pairs = vf->vsi->nb_used_qps;
+
+ pf = vf->pf;
+
+ if (!rte_is_power_of_2(req_pairs))
+ req_pairs = i40e_align_floor(req_pairs) << 1;
+
+ if (req_pairs == 0) {
+ PMD_DRV_LOG(ERR, "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_idx);
+ } else if (req_pairs > I40E_MAX_QP_NUM_PER_VF) {
+ PMD_DRV_LOG(ERR,
+ "VF %d tried to request more than %d queues.\n",
+ vf->vf_idx,
+ I40E_MAX_QP_NUM_PER_VF);
+ vfres->num_queue_pairs = I40E_MAX_QP_NUM_PER_VF;
+ } else if (req_pairs > cur_pairs + pf->qp_pool.num_free) {
+ PMD_DRV_LOG(ERR, "VF %d requested %d queues (rounded to %d) "
+ "but only %d available\n",
+ vf->vf_idx,
+ vfres->num_queue_pairs,
+ req_pairs,
+ cur_pairs + pf->qp_pool.num_free);
+ vfres->num_queue_pairs = i40e_align_floor(pf->qp_pool.num_free +
+ cur_pairs);
+ } else {
+ i40e_vc_notify_vf_reset(vf);
+ vf->vsi->nb_qps = req_pairs;
+ pf->vf_nb_qps = req_pairs;
+ i40e_pf_host_process_cmd_reset_vf(vf);
+
+ return 0;
+ }
+
+ return i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
+ (u8 *)vfres, sizeof(*vfres));
+}
+
void
i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint16_t abs_vf_id, uint32_t opcode,
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
struct rte_pmd_i40e_mb_event_param ret_param;
+ uint64_t first_cycle, cur_cycle;
bool b_op = TRUE;
+ int ret;
if (vf_id > pf->vf_num - 1 || !pf->vfs) {
PMD_DRV_LOG(ERR, "invalid argument");
}
vf = &pf->vfs[vf_id];
+
+ cur_cycle = rte_get_timer_cycles();
+
+ /* if the VF being blocked, ignore the message and return */
+ if (cur_cycle < vf->ignore_end_cycle)
+ return;
+
if (!vf->vsi) {
PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
i40e_pf_host_send_msg_to_vf(vf, opcode,
I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
- return;
+ goto check;
+ }
+
+ /* perform basic checks on the msg */
+ ret = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msglen);
+
+ /* perform additional checks specific to this driver */
+ if (opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
+ struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len != ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4))
+ ret = VIRTCHNL_ERR_PARAM;
+ } else if (opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries != ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4))
+ ret = VIRTCHNL_ERR_PARAM;
+ }
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Invalid message from VF %u, opcode %u, len %u",
+ vf_id, opcode, msglen);
+ i40e_pf_host_send_msg_to_vf(vf, opcode,
+ I40E_ERR_PARAM, NULL, 0);
+ goto check;
}
/**
* do nothing and send not_supported to VF. As PF must send a response
* to VF and ACK/NACK is not defined.
*/
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
- NULL, &ret_param);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
opcode);
PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_KEY received");
i40e_pf_host_process_cmd_set_rss_key(vf, msg, msglen, b_op);
break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_REQUEST_QUEUES received");
+ i40e_pf_host_process_cmd_request_queues(vf, msg);
+ break;
+
/* Don't add command supported below, which will
* return an error code.
*/
NULL, 0);
break;
}
+
+check:
+ /* if message validation not enabled */
+ if (!pf->vf_msg_cfg.max_msg)
+ return;
+
+ /* store current cycle */
+ vf->msg_timestamps[vf->msg_index++] = cur_cycle;
+ vf->msg_index %= pf->vf_msg_cfg.max_msg;
+
+ /* read the timestamp of earliest message */
+ first_cycle = vf->msg_timestamps[vf->msg_index];
+
+ /*
+ * If the time span from the arrival time of first message to
+ * the arrival time of current message smaller than `period`,
+ * that mean too much message in this statistic period.
+ */
+ if (first_cycle && cur_cycle < first_cycle +
+ (uint64_t)pf->vf_msg_cfg.period * rte_get_timer_hz()) {
+ PMD_DRV_LOG(WARNING, "VF %u too much messages(%u in %u"
+ " seconds),\n\tany new message from which"
+ " will be ignored during next %u seconds!",
+ vf_id, pf->vf_msg_cfg.max_msg,
+ (uint32_t)((cur_cycle - first_cycle +
+ rte_get_timer_hz() - 1) / rte_get_timer_hz()),
+ pf->vf_msg_cfg.ignore_second);
+ vf->ignore_end_cycle = rte_get_timer_cycles() +
+ pf->vf_msg_cfg.ignore_second *
+ rte_get_timer_hz();
+ }
}
int
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ size_t size;
int ret, i;
uint32_t val;
I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
I40E_WRITE_FLUSH(hw);
+ /* calculate the memory size for storing timestamp of messages */
+ size = pf->vf_msg_cfg.max_msg * sizeof(uint64_t);
+
for (i = 0; i < pf->vf_num; i++) {
pf->vfs[i].pf = pf;
pf->vfs[i].state = I40E_VF_INACTIVE;
pf->vfs[i].vf_idx = i;
+
+ if (size) {
+ /* allocate memory for store timestamp of messages */
+ pf->vfs[i].msg_timestamps =
+ rte_zmalloc("i40e_pf_vf", size, 0);
+ if (pf->vfs[i].msg_timestamps == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+
ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
if (ret != I40E_SUCCESS)
goto fail;
return I40E_SUCCESS;
fail:
+ for (; i >= 0; i--)
+ rte_free(pf->vfs[i].msg_timestamps);
rte_free(pf->vfs);
i40e_pf_enable_irq0(hw);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t val;
+ int i;
PMD_INIT_FUNC_TRACE();
(pf->vf_nb_qps == 0))
return I40E_SUCCESS;
+ /* free memory for store timestamp of messages */
+ for (i = 0; i < pf->vf_num; i++)
+ rte_free(pf->vfs[i].msg_timestamps);
+
/* free memory to store VF structure */
rte_free(pf->vfs);
pf->vfs = NULL;