for (i = 0; i < addr_list->num_elements; i++) {
mac = (struct rte_ether_addr *)(addr_list->list[i].addr);
- rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, mac, RTE_ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
if (rte_is_zero_ether_addr(mac) ||
i40e_vsi_add_mac(vf->vsi, &filter)) {
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
struct rte_pmd_i40e_mb_event_param ret_param;
+ uint64_t first_cycle, cur_cycle;
bool b_op = TRUE;
int ret;
}
vf = &pf->vfs[vf_id];
+
+ cur_cycle = rte_get_timer_cycles();
+
+ /* if the VF being blocked, ignore the message and return */
+ if (cur_cycle < vf->ignore_end_cycle)
+ return;
+
if (!vf->vsi) {
PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
i40e_pf_host_send_msg_to_vf(vf, opcode,
I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
- return;
+ goto check;
}
/* perform basic checks on the msg */
vf_id, opcode, msglen);
i40e_pf_host_send_msg_to_vf(vf, opcode,
I40E_ERR_PARAM, NULL, 0);
- return;
+ goto check;
}
/**
* do nothing and send not_supported to VF. As PF must send a response
* to VF and ACK/NACK is not defined.
*/
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
opcode);
NULL, 0);
break;
}
+
+check:
+ /* if message validation not enabled */
+ if (!pf->vf_msg_cfg.max_msg)
+ return;
+
+ /* store current cycle */
+ vf->msg_timestamps[vf->msg_index++] = cur_cycle;
+ vf->msg_index %= pf->vf_msg_cfg.max_msg;
+
+ /* read the timestamp of earliest message */
+ first_cycle = vf->msg_timestamps[vf->msg_index];
+
+ /*
+ * If the time span from the arrival time of first message to
+ * the arrival time of current message smaller than `period`,
+ * that mean too much message in this statistic period.
+ */
+ if (first_cycle && cur_cycle < first_cycle +
+ (uint64_t)pf->vf_msg_cfg.period * rte_get_timer_hz()) {
+ PMD_DRV_LOG(WARNING, "VF %u too much messages(%u in %u"
+ " seconds),\n\tany new message from which"
+ " will be ignored during next %u seconds!",
+ vf_id, pf->vf_msg_cfg.max_msg,
+ (uint32_t)((cur_cycle - first_cycle +
+ rte_get_timer_hz() - 1) / rte_get_timer_hz()),
+ pf->vf_msg_cfg.ignore_second);
+ vf->ignore_end_cycle = rte_get_timer_cycles() +
+ pf->vf_msg_cfg.ignore_second *
+ rte_get_timer_hz();
+ }
}
int
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ size_t size;
int ret, i;
uint32_t val;
I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
I40E_WRITE_FLUSH(hw);
+ /* calculate the memory size for storing timestamp of messages */
+ size = pf->vf_msg_cfg.max_msg * sizeof(uint64_t);
+
for (i = 0; i < pf->vf_num; i++) {
pf->vfs[i].pf = pf;
pf->vfs[i].state = I40E_VF_INACTIVE;
pf->vfs[i].vf_idx = i;
+
+ if (size) {
+ /* allocate memory for store timestamp of messages */
+ pf->vfs[i].msg_timestamps =
+ rte_zmalloc("i40e_pf_vf", size, 0);
+ if (pf->vfs[i].msg_timestamps == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+
ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
if (ret != I40E_SUCCESS)
goto fail;
return I40E_SUCCESS;
fail:
+ for (; i >= 0; i--)
+ rte_free(pf->vfs[i].msg_timestamps);
rte_free(pf->vfs);
i40e_pf_enable_irq0(hw);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t val;
+ int i;
PMD_INIT_FUNC_TRACE();
(pf->vf_nb_qps == 0))
return I40E_SUCCESS;
+ /* free memory for store timestamp of messages */
+ for (i = 0; i < pf->vf_num; i++)
+ rte_free(pf->vfs[i].msg_timestamps);
+
/* free memory to store VF structure */
rte_free(pf->vfs);
pf->vfs = NULL;