dev_private->dev_ops = virtual_ethdev_default_dev_ops;
eth_dev->dev_ops = &dev_private->dev_ops;
- eth_dev->pci_dev = pci_dev;
- eth_dev->pci_dev->device.driver = ð_drv->pci_drv.driver;
+ pci_dev->device.driver = ð_drv->pci_drv.driver;
+ eth_dev->device = &pci_dev->device;
eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = sc->max_rx_queues;
dev_info->max_tx_queues = sc->max_tx_queues;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
- dev_info->pci_dev = eth_dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/* MAC Specifics */
dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
static int bnxt_init_board(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
int rc;
/* enable device (incl. PCI PM wakeup), and bus-mastering */
static int
bnxt_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
static int version_printed;
struct bnxt *bp;
int rc;
static inline int
find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
{
+ struct rte_pci_device *pci_dev;
struct rte_pci_addr *eth_pci_addr;
unsigned i;
for (i = 0; i < rte_eth_dev_count(); i++) {
- if (rte_eth_devices[i].pci_dev == NULL)
+ /* Currently populated by rte_eth_copy_pci_info().
+ *
+ * TODO: Once the PCI bus has arrived we should have a better
+ * way to test for being a PCI device or not.
+ */
+ if (rte_eth_devices[i].data->kdrv == RTE_KDRV_UNKNOWN ||
+ rte_eth_devices[i].data->kdrv == RTE_KDRV_NONE)
continue;
- eth_pci_addr = &(rte_eth_devices[i].pci_dev->addr);
+ pci_dev = RTE_DEV_TO_PCI(rte_eth_devices[i].device);
+ eth_pci_addr = &pci_dev->addr;
if (pci_addr->bus == eth_pci_addr->bus &&
pci_addr->devid == eth_pci_addr->devid &&
.nb_align = 1,
};
- device_info->pci_dev = eth_dev->pci_dev;
+ device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
pi->eth_dev->data = data;
allocate_mac:
- pi->eth_dev->pci_dev = adapter->pdev;
+ pi->eth_dev->device = &adapter->pdev->device;
pi->eth_dev->data->dev_private = pi;
pi->eth_dev->driver = adapter->eth_dev->driver;
pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
- rte_eth_copy_pci_info(pi->eth_dev, pi->eth_dev->pci_dev);
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
ETHER_ADDR_LEN, 0);
(&((struct e1000_adapter *)adapter)->filter)
#define E1000_DEV_TO_PCI(eth_dev) \
- (eth_dev->pci_dev)
+ RTE_DEV_TO_PCI((eth_dev)->device)
/*
* RX/TX IGB function prototypes
*/
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
adapter->pdev = pci_dev;
PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- device_info->pci_dev = eth_dev->pci_dev;
+ device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
- pdev = eth_dev->pci_dev;
+ pdev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pdev);
enic->pdev = pdev;
addr = &pdev->addr;
/* Enable use of FTAG bit in TX descriptor, PFVTCTL
* register is read-only for VF.
*/
- if (fm10k_check_ftag(dev->pci_dev->device.devargs)) {
+ if (fm10k_check_ftag(dev->device->devargs)) {
if (hw->mac.type == fm10k_mac_pf) {
FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
FM10K_PFVTCTL_FTAG_DESC_ENABLE);
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_macvlan_filter_info *macvlan;
- struct rte_pci_device *pdev = dev->pci_dev;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i, ret;
struct fm10k_rx_queue *rxq;
fm10k_dev_stop(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = dev->pci_dev;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i;
struct rte_eth_dev_info *dev_info)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = dev->pci_dev;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
PMD_INIT_FUNC_TRACE();
fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = dev->pci_dev;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
/* Enable ITR */
if (hw->mac.type == fm10k_mac_pf)
fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = dev->pci_dev;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
/* Disable ITR */
if (hw->mac.type == fm10k_mac_pf)
fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = dev->pci_dev;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
uint32_t intr_vector, vec;
uint16_t queue_id;
int use_sse = 1;
uint16_t tx_ftag_en = 0;
- if (fm10k_check_ftag(dev->pci_dev->device.devargs))
+ if (fm10k_check_ftag(dev->device->devargs))
tx_ftag_en = 1;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
uint16_t i, rx_using_sse;
uint16_t rx_ftag_en = 0;
- if (fm10k_check_ftag(dev->pci_dev->device.devargs))
+ if (fm10k_check_ftag(dev->device->devargs))
rx_ftag_en = 1;
/* In order to allow Vector Rx there are a few configuration
eth_fm10k_dev_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = dev->pci_dev;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int diag, i;
struct fm10k_macvlan_filter_info *macvlan;
eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = dev->pci_dev;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
PMD_INIT_FUNC_TRACE();
struct rte_eth_txq_info *qinfo);
#define I40E_DEV_TO_PCI(eth_dev) \
- (eth_dev->pci_dev)
+ RTE_DEV_TO_PCI((eth_dev)->device)
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
};
#define IXGBE_DEV_TO_PCI(eth_dev) \
- (eth_dev->pci_dev)
+ RTE_DEV_TO_PCI((eth_dev)->device)
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = dev->pci_dev;
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
if (priv == NULL)
return;
eth_dev->data->dev_private = priv;
eth_dev->data->mac_addrs = priv->mac;
}
- eth_dev->pci_dev = pci_dev;
+ eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->mac_addrs = priv->mac;
}
- eth_dev->pci_dev = pci_dev;
+ eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->driver = &mlx5_driver;
priv->dev = eth_dev;
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = dev->pci_dev;
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
priv_lock(priv);
/* FIXME: we should ask the device for these values. */
PMD_INIT_LOG(DEBUG, "Close\n");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
/*
* We assume that the DPDK application is stopping all the
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->driver_name = dev->driver->pci_drv.driver.name;
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
static void
nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
struct rte_pci_device *pci_dev;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
/* If MSI-X auto-masking is used, clear the entry */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->device_id = pci_dev->id.device_id;
PMD_INIT_FUNC_TRACE(edev);
- dev_info->pci_dev = eth_dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
QEDE_ETH_OVERHEAD);
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
int rc;
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->if_index = 0;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
struct szedata *szedata_temp;
int ret;
uint32_t szedata2_index;
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_pci_addr *pci_addr = &pci_dev->addr;
struct rte_mem_resource *pci_rsc =
&pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
static int
rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_pci_addr *pci_addr = &pci_dev->addr;
rte_free(dev->data->mac_addrs);
nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
dev_info->max_tx_queues =
(uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_mac_addrs = 1;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa =
}
}
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
nic->device_id = pci_dev->id.device_id;
* virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
*/
if (!hw->virtio_user_dev) {
- ret = vtpci_init(eth_dev->pci_dev, hw, &dev_flags);
+ ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw,
+ &dev_flags);
if (ret)
return ret;
}
data->numa_node = SOCKET_ID_ANY;
data->kdrv = RTE_KDRV_NONE;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- eth_dev->pci_dev = NULL;
eth_dev->driver = NULL;
return eth_dev;
}
eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/*
* for secondary processes, we don't initialize any further as primary
vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
};
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_pci_device.
+ */
+#define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
+
/** Any PCI device identifier (vendor, device, ...) */
#define PCI_ANY_ID (0xffff)
#define RTE_CLASS_ANY_ID (0xffffff)
if (eth_dev->data->dev_private == NULL)
rte_panic("Cannot allocate memzone for private port data\n");
}
- eth_dev->pci_dev = pci_dev;
+ eth_dev->device = &pci_dev->device;
eth_dev->intr_handle = &pci_dev->intr_handle;
eth_dev->driver = eth_drv;
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_free(eth_dev->data->dev_private);
- eth_dev->pci_dev = NULL;
+ eth_dev->device = NULL;
eth_dev->driver = NULL;
eth_dev->data = NULL;
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
+ dev->data->drv_name, ring_name,
dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
struct rte_eth_dev_data *data; /**< Pointer to device data */
const struct eth_driver *driver;/**< Driver for this device */
const struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */
- struct rte_pci_device *pci_dev; /**< PCI info. supplied by probing */
+ struct rte_device *device; /**< Backing device */
struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
/** User application callbacks for NIC interrupts */
struct rte_eth_dev_cb_list link_intr_cbs;