X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_pf.c;h=7bf1e794106358e2c135fff37e6c75f1174cc54f;hb=e0e325065bee98dade424f2e68a8b473a83ac2f5;hp=93b484bbb563b9ed383bfb3a80c3ace28fe1d67d;hpb=2d9b2787e23d778dec87563c588e3e170206d593;p=dpdk.git diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index 93b484bbb5..7bf1e79410 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -244,19 +244,23 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, } static void -i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op) +i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, uint8_t *msg, + bool b_op) { struct virtchnl_version_info info; - /* Respond like a Linux PF host in order to support both DPDK VF and - * Linux VF driver. The expense is original DPDK host specific feature + /* VF and PF drivers need to follow the Virtchnl definition, No matter + * it's DPDK or other kernel drivers. + * The original DPDK host specific feature * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available. - * - * DPDK VF also can't identify host driver by version number returned. - * It always assume talking with Linux PF. */ + info.major = VIRTCHNL_VERSION_MAJOR; - info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + vf->version = *(struct virtchnl_version_info *)msg; + if (VF_IS_V10(&vf->version)) + info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + else + info.minor = VIRTCHNL_VERSION_MINOR; if (b_op) i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, @@ -280,11 +284,13 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf) } static int -i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op) +i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, uint8_t *msg, + bool b_op) { struct virtchnl_vf_resource *vf_res = NULL; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); uint32_t len = 0; + uint64_t default_hena = I40E_RSS_HENA_ALL; int ret = I40E_SUCCESS; if (!b_op) { @@ -308,18 +314,42 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op) goto send_msg; } - vf_res->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 | - VIRTCHNL_VF_OFFLOAD_VLAN; + if (VF_IS_V10(&vf->version)) /* doesn't support offload negotiate */ + vf->request_caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_VLAN; + else + vf->request_caps = *(uint32_t *)msg; + + /* enable all RSS by default, + * doesn't support hena setting by virtchnnl yet. + */ + if (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + I40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx), + (uint32_t)default_hena); + I40E_WRITE_REG(hw, I40E_VFQF_HENA1(1, vf->vf_idx), + (uint32_t)(default_hena >> 32)); + I40E_WRITE_FLUSH(hw); + } + + vf_res->vf_cap_flags = vf->request_caps & + I40E_VIRTCHNL_OFFLOAD_CAPS; + /* For X722, it supports write back on ITR + * without binding queue to interrupt vector. + */ + if (hw->mac.type == I40E_MAC_X722) + vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf; vf_res->num_queue_pairs = vf->vsi->nb_qps; vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM; + vf_res->rss_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * 4; + vf_res->rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4; /* Change below setting if PF host can support more VSIs for VF */ vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id; vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps; - ether_addr_copy(&vf->mac_addr, - (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr); + rte_ether_addr_copy(&vf->mac_addr, + (struct rte_ether_addr *)vf_res->vsi_res[0].default_mac_addr); send_msg: i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, @@ -793,7 +823,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf, (struct virtchnl_ether_addr_list *)msg; struct i40e_mac_filter_info filter; int i; - struct ether_addr *mac; + struct rte_ether_addr *mac; if (!b_op) { i40e_pf_host_send_msg_to_vf( @@ -812,10 +842,10 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf, } for (i = 0; i < addr_list->num_elements; i++) { - mac = (struct ether_addr *)(addr_list->list[i].addr); - rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN); + mac = (struct rte_ether_addr *)(addr_list->list[i].addr); + rte_memcpy(&filter.mac_addr, mac, RTE_ETHER_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; - if (is_zero_ether_addr(mac) || + if (rte_is_zero_ether_addr(mac) || i40e_vsi_add_mac(vf->vsi, &filter)) { ret = I40E_ERR_INVALID_MAC_ADDR; goto send_msg; @@ -839,7 +869,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf, struct virtchnl_ether_addr_list *addr_list = (struct virtchnl_ether_addr_list *)msg; int i; - struct ether_addr *mac; + struct rte_ether_addr *mac; if (!b_op) { i40e_pf_host_send_msg_to_vf( @@ -856,8 +886,8 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf, } for (i = 0; i < addr_list->num_elements; i++) { - mac = (struct ether_addr *)(addr_list->list[i].addr); - if(is_zero_ether_addr(mac) || + mac = (struct rte_ether_addr *)(addr_list->list[i].addr); + if (rte_is_zero_ether_addr(mac) || i40e_vsi_delete_mac(vf->vsi, mac)) { ret = I40E_ERR_INVALID_MAC_ADDR; goto send_msg; @@ -1061,6 +1091,84 @@ i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op) return ret; } +static int +i40e_pf_host_process_cmd_set_rss_lut(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + struct virtchnl_rss_lut *rss_lut = (struct virtchnl_rss_lut *)msg; + uint16_t valid_len; + int ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_CONFIG_RSS_LUT, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (!msg || msglen <= sizeof(struct virtchnl_rss_lut)) { + PMD_DRV_LOG(ERR, "set_rss_lut argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + valid_len = sizeof(struct virtchnl_rss_lut) + rss_lut->lut_entries - 1; + if (msglen < valid_len) { + PMD_DRV_LOG(ERR, "set_rss_lut length mismatch"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + ret = i40e_set_rss_lut(vf->vsi, rss_lut->lut, rss_lut->lut_entries); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_set_rss_key(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + struct virtchnl_rss_key *rss_key = (struct virtchnl_rss_key *)msg; + uint16_t valid_len; + int ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_DEL_VLAN, + VIRTCHNL_OP_CONFIG_RSS_KEY, NULL, 0); + return ret; + } + + if (!msg || msglen <= sizeof(struct virtchnl_rss_key)) { + PMD_DRV_LOG(ERR, "set_rss_key argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + valid_len = sizeof(struct virtchnl_rss_key) + rss_key->key_len - 1; + if (msglen < valid_len) { + PMD_DRV_LOG(ERR, "set_rss_key length mismatch"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + ret = i40e_set_rss_key(vf->vsi, rss_key->key, rss_key->key_len); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, + ret, NULL, 0); + + return ret; +} + void i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) { @@ -1110,6 +1218,72 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); } +/** + * i40e_vc_notify_vf_reset + * @vf: pointer to the VF structure + * + * indicate a pending reset to the given VF + **/ +static void +i40e_vc_notify_vf_reset(struct i40e_pf_vf *vf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + struct virtchnl_pf_event pfe; + int abs_vf_id; + uint16_t vf_id = vf->vf_idx; + + abs_vf_id = vf_id + hw->func_caps.vf_base_id; + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + sizeof(struct virtchnl_pf_event), NULL); +} + +static int +i40e_pf_host_process_cmd_request_queues(struct i40e_pf_vf *vf, uint8_t *msg) +{ + struct virtchnl_vf_res_request *vfres = + (struct virtchnl_vf_res_request *)msg; + struct i40e_pf *pf; + uint32_t req_pairs = vfres->num_queue_pairs; + uint32_t cur_pairs = vf->vsi->nb_used_qps; + + pf = vf->pf; + + if (!rte_is_power_of_2(req_pairs)) + req_pairs = i40e_align_floor(req_pairs) << 1; + + if (req_pairs == 0) { + PMD_DRV_LOG(ERR, "VF %d tried to request 0 queues. Ignoring.\n", + vf->vf_idx); + } else if (req_pairs > I40E_MAX_QP_NUM_PER_VF) { + PMD_DRV_LOG(ERR, + "VF %d tried to request more than %d queues.\n", + vf->vf_idx, + I40E_MAX_QP_NUM_PER_VF); + vfres->num_queue_pairs = I40E_MAX_QP_NUM_PER_VF; + } else if (req_pairs > cur_pairs + pf->qp_pool.num_free) { + PMD_DRV_LOG(ERR, "VF %d requested %d queues (rounded to %d) " + "but only %d available\n", + vf->vf_idx, + vfres->num_queue_pairs, + req_pairs, + cur_pairs + pf->qp_pool.num_free); + vfres->num_queue_pairs = i40e_align_floor(pf->qp_pool.num_free + + cur_pairs); + } else { + i40e_vc_notify_vf_reset(vf); + vf->vsi->nb_qps = req_pairs; + pf->vf_nb_qps = req_pairs; + i40e_pf_host_process_cmd_reset_vf(vf); + + return 0; + } + + return i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, + (u8 *)vfres, sizeof(*vfres)); +} + void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, uint16_t abs_vf_id, uint32_t opcode, @@ -1123,7 +1297,9 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, /* AdminQ will pass absolute VF id, transfer to internal vf id */ uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id; struct rte_pmd_i40e_mb_event_param ret_param; + uint64_t first_cycle, cur_cycle; bool b_op = TRUE; + int ret; if (vf_id > pf->vf_num - 1 || !pf->vfs) { PMD_DRV_LOG(ERR, "invalid argument"); @@ -1131,11 +1307,42 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, } vf = &pf->vfs[vf_id]; + + cur_cycle = rte_get_timer_cycles(); + + /* if the VF being blocked, ignore the message and return */ + if (cur_cycle < vf->ignore_end_cycle) + return; + if (!vf->vsi) { PMD_DRV_LOG(ERR, "NO VSI associated with VF found"); i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_NO_AVAILABLE_VSI, NULL, 0); - return; + goto check; + } + + /* perform basic checks on the msg */ + ret = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msglen); + + /* perform additional checks specific to this driver */ + if (opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { + struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; + + if (vrk->key_len != ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)) + ret = VIRTCHNL_ERR_PARAM; + } else if (opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { + struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + + if (vrl->lut_entries != ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)) + ret = VIRTCHNL_ERR_PARAM; + } + + if (ret) { + PMD_DRV_LOG(ERR, "Invalid message from VF %u, opcode %u, len %u", + vf_id, opcode, msglen); + i40e_pf_host_send_msg_to_vf(vf, opcode, + I40E_ERR_PARAM, NULL, 0); + goto check; } /** @@ -1156,8 +1363,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, * do nothing and send not_supported to VF. As PF must send a response * to VF and ACK/NACK is not defined. */ - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, - NULL, &ret_param); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param); if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) { PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!", opcode); @@ -1167,7 +1373,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, switch (opcode) { case VIRTCHNL_OP_VERSION: PMD_DRV_LOG(INFO, "OP_VERSION received"); - i40e_pf_host_process_cmd_version(vf, b_op); + i40e_pf_host_process_cmd_version(vf, msg, b_op); break; case VIRTCHNL_OP_RESET_VF: PMD_DRV_LOG(INFO, "OP_RESET_VF received"); @@ -1175,7 +1381,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, break; case VIRTCHNL_OP_GET_VF_RESOURCES: PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received"); - i40e_pf_host_process_cmd_get_vf_resource(vf, b_op); + i40e_pf_host_process_cmd_get_vf_resource(vf, msg, b_op); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received"); @@ -1236,6 +1442,19 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received"); i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op); break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_LUT received"); + i40e_pf_host_process_cmd_set_rss_lut(vf, msg, msglen, b_op); + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_KEY received"); + i40e_pf_host_process_cmd_set_rss_key(vf, msg, msglen, b_op); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + PMD_DRV_LOG(INFO, "OP_REQUEST_QUEUES received"); + i40e_pf_host_process_cmd_request_queues(vf, msg); + break; + /* Don't add command supported below, which will * return an error code. */ @@ -1245,6 +1464,37 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, NULL, 0); break; } + +check: + /* if message validation not enabled */ + if (!pf->vf_msg_cfg.max_msg) + return; + + /* store current cycle */ + vf->msg_timestamps[vf->msg_index++] = cur_cycle; + vf->msg_index %= pf->vf_msg_cfg.max_msg; + + /* read the timestamp of earliest message */ + first_cycle = vf->msg_timestamps[vf->msg_index]; + + /* + * If the time span from the arrival time of first message to + * the arrival time of current message smaller than `period`, + * that mean too much message in this statistic period. + */ + if (first_cycle && cur_cycle < first_cycle + + (uint64_t)pf->vf_msg_cfg.period * rte_get_timer_hz()) { + PMD_DRV_LOG(WARNING, "VF %u too much messages(%u in %u" + " seconds),\n\tany new message from which" + " will be ignored during next %u seconds!", + vf_id, pf->vf_msg_cfg.max_msg, + (uint32_t)((cur_cycle - first_cycle + + rte_get_timer_hz() - 1) / rte_get_timer_hz()), + pf->vf_msg_cfg.ignore_second); + vf->ignore_end_cycle = rte_get_timer_cycles() + + pf->vf_msg_cfg.ignore_second * + rte_get_timer_hz(); + } } int @@ -1252,6 +1502,7 @@ i40e_pf_host_init(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); + size_t size; int ret, i; uint32_t val; @@ -1278,10 +1529,24 @@ i40e_pf_host_init(struct rte_eth_dev *dev) I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val); I40E_WRITE_FLUSH(hw); + /* calculate the memory size for storing timestamp of messages */ + size = pf->vf_msg_cfg.max_msg * sizeof(uint64_t); + for (i = 0; i < pf->vf_num; i++) { pf->vfs[i].pf = pf; pf->vfs[i].state = I40E_VF_INACTIVE; pf->vfs[i].vf_idx = i; + + if (size) { + /* allocate memory for store timestamp of messages */ + pf->vfs[i].msg_timestamps = + rte_zmalloc("i40e_pf_vf", size, 0); + if (pf->vfs[i].msg_timestamps == NULL) { + ret = -ENOMEM; + goto fail; + } + } + ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); if (ret != I40E_SUCCESS) goto fail; @@ -1294,6 +1559,8 @@ i40e_pf_host_init(struct rte_eth_dev *dev) return I40E_SUCCESS; fail: + for (; i >= 0; i--) + rte_free(pf->vfs[i].msg_timestamps); rte_free(pf->vfs); i40e_pf_enable_irq0(hw); @@ -1306,6 +1573,7 @@ i40e_pf_host_uninit(struct rte_eth_dev *dev) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint32_t val; + int i; PMD_INIT_FUNC_TRACE(); @@ -1318,6 +1586,10 @@ i40e_pf_host_uninit(struct rte_eth_dev *dev) (pf->vf_nb_qps == 0)) return I40E_SUCCESS; + /* free memory for store timestamp of messages */ + for (i = 0; i < pf->vf_num; i++) + rte_free(pf->vfs[i].msg_timestamps); + /* free memory to store VF structure */ rte_free(pf->vfs); pf->vfs = NULL;