qed_ops->common->update_pf_params(edev, &pf_params);
}
+static void qede_generate_random_mac_addr(struct rte_ether_addr *mac_addr)
+{
+ uint64_t random;
+
+ /* Set Organizationally Unique Identifier (OUI) prefix. */
+ mac_addr->addr_bytes[0] = 0x00;
+ mac_addr->addr_bytes[1] = 0x09;
+ mac_addr->addr_bytes[2] = 0xC0;
+
+ /* Force indication of locally assigned MAC address. */
+ mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
+
+ /* Generate the last 3 bytes of the MAC address with a random number. */
+ random = rte_rand();
+
+ memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
{
struct rte_pci_device *pci_dev;
uint8_t bulletin_change;
uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
uint8_t is_mac_forced;
- bool is_mac_exist;
+ bool is_mac_exist = false;
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
DP_ERR(edev, "No VF macaddr assigned\n");
}
}
+
+ /* If MAC doesn't exist from PF, generate random one */
+ if (!is_mac_exist) {
+ struct rte_ether_addr *mac_addr;
+
+ mac_addr = (struct rte_ether_addr *)&vf_mac;
+ qede_generate_random_mac_addr(mac_addr);
+
+ rte_ether_addr_copy(mac_addr,
+ ð_dev->data->mac_addrs[0]);
+
+ rte_ether_addr_copy(ð_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ }
}
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
ecore_ptt_release(hwfn, ptt);
}
+static void qed_handle_bulletin_post(struct ecore_hwfn *hwfn)
+{
+ struct ecore_ptt *ptt;
+ int i;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, true, "PTT acquire failed\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ return;
+ }
+
+ /* TODO - at the moment update bulletin board of all VFs.
+ * if this proves to costly, we can mark VFs that need their
+ * bulletins updated.
+ */
+ ecore_for_each_vf(hwfn, i)
+ ecore_iov_post_vf_bulletin(hwfn, i, ptt);
+
+ ecore_ptt_release(hwfn, ptt);
+}
+
void qed_iov_pf_task(void *arg)
{
struct ecore_hwfn *p_hwfn = arg;
OSAL_CLEAR_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags);
qed_handle_vf_msg(p_hwfn);
}
+
+ if (OSAL_GET_BIT(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ &p_hwfn->iov_task_flags)) {
+ OSAL_CLEAR_BIT(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ &p_hwfn->iov_task_flags);
+ qed_handle_bulletin_post(p_hwfn);
+ }
}
int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag)
OSAL_SET_BIT(flag, &p_hwfn->iov_task_flags);
return rte_eal_alarm_set(1, qed_iov_pf_task, p_hwfn);
}
+
+void qed_inform_vf_link_state(struct ecore_hwfn *hwfn)
+{
+ struct ecore_hwfn *lead_hwfn = ECORE_LEADING_HWFN(hwfn->p_dev);
+ struct ecore_mcp_link_capabilities caps;
+ struct ecore_mcp_link_params params;
+ struct ecore_mcp_link_state link;
+ int i;
+
+ if (!hwfn->pf_iov_info)
+ return;
+
+ rte_memcpy(¶ms, ecore_mcp_get_link_params(lead_hwfn),
+ sizeof(params));
+ rte_memcpy(&link, ecore_mcp_get_link_state(lead_hwfn), sizeof(link));
+ rte_memcpy(&caps, ecore_mcp_get_link_capabilities(lead_hwfn),
+ sizeof(caps));
+
+ /* Update bulletin of all future possible VFs with link configuration */
+ for (i = 0; i < hwfn->p_dev->p_iov_info->total_vfs; i++) {
+ ecore_iov_set_link(hwfn, i,
+ ¶ms, &link, &caps);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+}