static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
size_t size;
- if (fw_ver == NULL)
- return 0;
-
if (IS_PF(edev))
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
QEDE_PMD_FW_VERSION);
static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
{
+ OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+ OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
}
static void
}
}
- if (mask & ETH_VLAN_EXTEND_MASK)
- DP_ERR(edev, "Extend VLAN not supported\n");
-
qdev->vlan_offload_mask = mask;
DP_INFO(edev, "VLAN offload mask %d\n", mask);
return -1; /* common error code is < 0 */
}
-static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+static int qede_dev_stop(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
PMD_INIT_FUNC_TRACE(edev);
+ eth_dev->data->dev_started = 0;
/* Bring the link down */
qede_dev_set_link_state(eth_dev, false);
/* Disable vport */
if (qede_activate_vport(eth_dev, false))
- return;
+ return 0;
if (qdev->enable_lro)
qede_enable_tpa(eth_dev, false);
ecore_hw_stop_fastpath(edev); /* TBD - loop */
DP_INFO(edev, "Device is stopped\n");
+
+ return 0;
}
static const char * const valid_args[] = {
}
}
-static void qede_dev_close(struct rte_eth_dev *eth_dev)
+static int qede_dev_close(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int ret = 0;
PMD_INIT_FUNC_TRACE(edev);
+ /* only close in case of the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
* can release all the resources and device can be brought up newly
*/
if (eth_dev->data->dev_started)
- qede_dev_stop(eth_dev);
+ ret = qede_dev_stop(eth_dev);
if (qdev->vport_started)
qede_stop_vport(edev);
if (ECORE_IS_CMT(edev))
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
+
+ return ret;
}
static int
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
enum _ecore_status_t ecore_status;
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+ type = QED_FILTER_RX_MODE_TYPE_PROMISC;
ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
/* RSS hash key */
if (key) {
if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
- DP_ERR(edev, "RSS key length exceeds limit\n");
- return -EINVAL;
+ len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+ DP_NOTICE(edev, false,
+ "RSS key length too big, trimmed to %d\n",
+ len);
}
DP_INFO(edev, "Applying user supplied hash key\n");
rss_params.update_rss_key = 1;
}
if (dev->data->dev_started) {
dev->data->dev_started = 0;
- qede_dev_stop(dev);
+ rc = qede_dev_stop(dev);
+ if (rc != 0)
+ return rc;
restart = true;
}
rte_delay_ms(1000);
fp->rxq->rx_buf_size = rc;
}
}
- if (max_rx_pkt_len > RTE_ETHER_MAX_LEN)
+ if (frame_size > QEDE_ETH_MAX_LEN)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
.reta_update = qede_rss_reta_update,
.reta_query = qede_rss_reta_query,
.mtu_set = qede_set_mtu,
- .filter_ctrl = qede_dev_filter_ctrl,
+ .flow_ops_get = qede_dev_flow_ops_get,
.udp_tunnel_port_add = qede_udp_dst_port_add,
.udp_tunnel_port_del = qede_udp_dst_port_del,
.fw_version_get = qede_fw_version_get,
+ .get_reg = qede_get_regs,
};
static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
qed_ops->common->update_pf_params(edev, &pf_params);
}
+static void qede_generate_random_mac_addr(struct rte_ether_addr *mac_addr)
+{
+ uint64_t random;
+
+ /* Set Organizationally Unique Identifier (OUI) prefix. */
+ mac_addr->addr_bytes[0] = 0x00;
+ mac_addr->addr_bytes[1] = 0x09;
+ mac_addr->addr_bytes[2] = 0xC0;
+
+ /* Force indication of locally assigned MAC address. */
+ mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
+
+ /* Generate the last 3 bytes of the MAC address with a random number. */
+ random = rte_rand();
+
+ memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
{
struct rte_pci_device *pci_dev;
uint8_t bulletin_change;
uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
uint8_t is_mac_forced;
- bool is_mac_exist;
+ bool is_mac_exist = false;
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* @DPDK */
edev->vendor_id = pci_dev->id.vendor_id;
DP_ERR(edev, "No VF macaddr assigned\n");
}
}
+
+ /* If MAC doesn't exist from PF, generate random one */
+ if (!is_mac_exist) {
+ struct rte_ether_addr *mac_addr;
+
+ mac_addr = (struct rte_ether_addr *)&vf_mac;
+ qede_generate_random_mac_addr(mac_addr);
+
+ rte_ether_addr_copy(mac_addr,
+ ð_dev->data->mac_addrs[0]);
+
+ rte_ether_addr_copy(ð_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ }
}
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
+ eth_dev->rx_descriptor_status = qede_rx_descriptor_status;
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
adapter->vxlan.enable = false;
adapter->geneve.enable = false;
adapter->ipgre.enable = false;
+ qed_ops->sriov_configure(edev, pci_dev->max_vfs);
}
DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
-
PMD_INIT_FUNC_TRACE(edev);
-
- /* only uninitialize in the primary process */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- /* safe to close dev here */
qede_dev_close(eth_dev);
-
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
-
return 0;
}
RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
-RTE_LOG_REGISTER(qede_logtype_init, pmd.net.qede.init, NOTICE);
-RTE_LOG_REGISTER(qede_logtype_driver, pmd.net.qede.driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(qede_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(qede_logtype_driver, driver, NOTICE);