#include <rte_dev.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_string_fns.h>
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
+
+ ENICPMD_FUNC_TRACE();
- if (RTE_ETH_FILTER_FDIR == filter_type)
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &enic_flow_ops;
+ break;
+ case RTE_ETH_FILTER_FDIR:
ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
- else
+ break;
+ default:
dev_warning(enic, "Filter type (%d) not supported",
filter_type);
+ ret = -EINVAL;
+ break;
+ }
return ret;
}
static void enicpmd_dev_tx_queue_release(void *txq)
{
ENICPMD_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
enic_free_wq(txq);
}
int ret;
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
ENICPMD_FUNC_TRACE();
if (queue_idx >= ENIC_WQ_MAX) {
dev_err(enic,
static void enicpmd_dev_rx_queue_release(void *rxq)
{
ENICPMD_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
enic_free_rq(rxq);
}
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
/* With Rx scatter support, two RQs are now used on VIC per RQ used
* by the application.
*/
return err;
}
-static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
if (mask & ETH_VLAN_STRIP_MASK) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)
enic->ig_vlan_strip_en = 1;
else
enic->ig_vlan_strip_en = 0;
dev_warning(enic,
"Configuration of extended VLAN is not supported\n");
}
+
+ return 0;
}
static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
int ret;
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
ENICPMD_FUNC_TRACE();
ret = enic_set_vnic_res(enic);
if (ret) {
}
if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
- eth_dev->data->dev_conf.rxmode.header_split) {
+ (eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_HEADER_SPLIT)) {
/* Enable header-data-split */
enic_set_hdr_split_size(enic,
eth_dev->data->dev_conf.rxmode.split_hdr_size);
}
- enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
- enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
- return 0;
+ enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CHECKSUM);
+ ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
+
+ return ret;
}
/* Start the device.
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
ENICPMD_FUNC_TRACE();
return enic_enable(enic);
}
struct rte_eth_link link;
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic_disable(enic);
memset(&link, 0, sizeof(link));
return enic_link_update(enic);
}
-static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *stats)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- enic_dev_stats_get(enic, stats);
+ return enic_dev_stats_get(enic, stats);
}
static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
+
enic->promisc = 1;
enic_add_packet_filter(enic);
}
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic->promisc = 0;
enic_add_packet_filter(enic);
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic->allmulti = 1;
enic_add_packet_filter(enic);
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic->allmulti = 0;
enic_add_packet_filter(enic);
}
-static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
+static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
struct ether_addr *mac_addr,
__rte_unused uint32_t index, __rte_unused uint32_t pool)
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
ENICPMD_FUNC_TRACE();
- enic_set_mac_address(enic, mac_addr->addr_bytes);
+ return enic_set_mac_address(enic, mac_addr->addr_bytes);
}
static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic_del_mac_address(enic, index);
}
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
- pdev = RTE_DEV_TO_PCI(eth_dev->device);
+ pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pdev);
enic->pdev = pdev;
addr = &pdev->addr;
RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");