2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
4 * Copyright (c) 2015 QLogic Corporation.
8 * See LICENSE.bnx2x_pmd for copyright and licensing details.
12 #include "bnx2x_rxtx.h"
15 #include <rte_ethdev_pci.h>
17 int bnx2x_logtype_init;
18 int bnx2x_logtype_driver;
21 * The set of PCI devices this driver supports
23 #define BROADCOM_PCI_VENDOR_ID 0x14E4
24 static const struct rte_pci_id pci_id_bnx2x_map[] = {
25 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
26 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
27 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
28 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
29 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
30 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
31 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
32 #ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
33 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
34 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
35 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
40 static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
41 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
42 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
43 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
44 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
48 struct rte_bnx2x_xstats_name_off {
49 char name[RTE_ETH_XSTATS_NAME_SIZE];
54 static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
56 offsetof(struct bnx2x_eth_stats, brb_drop_hi),
57 offsetof(struct bnx2x_eth_stats, brb_drop_lo)},
58 {"rx_buffer_truncates",
59 offsetof(struct bnx2x_eth_stats, brb_truncate_hi),
60 offsetof(struct bnx2x_eth_stats, brb_truncate_lo)},
61 {"rx_buffer_truncate_discard",
62 offsetof(struct bnx2x_eth_stats, brb_truncate_discard),
63 offsetof(struct bnx2x_eth_stats, brb_truncate_discard)},
64 {"mac_filter_discard",
65 offsetof(struct bnx2x_eth_stats, mac_filter_discard),
66 offsetof(struct bnx2x_eth_stats, mac_filter_discard)},
67 {"no_match_vlan_tag_discard",
68 offsetof(struct bnx2x_eth_stats, mf_tag_discard),
69 offsetof(struct bnx2x_eth_stats, mf_tag_discard)},
71 offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi),
72 offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)},
74 offsetof(struct bnx2x_eth_stats, pause_frames_received_hi),
75 offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)},
76 {"tx_priority_flow_control",
77 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi),
78 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)},
79 {"rx_priority_flow_control",
80 offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi),
81 offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
85 bnx2x_link_update(struct rte_eth_dev *dev)
87 struct bnx2x_softc *sc = dev->data->dev_private;
89 PMD_INIT_FUNC_TRACE();
90 bnx2x_link_status_update(sc);
92 dev->data->dev_link.link_speed = sc->link_vars.line_speed;
93 switch (sc->link_vars.duplex) {
95 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
98 dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
101 dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
102 ETH_LINK_SPEED_FIXED);
103 dev->data->dev_link.link_status = sc->link_vars.link_up;
107 bnx2x_interrupt_action(struct rte_eth_dev *dev)
109 struct bnx2x_softc *sc = dev->data->dev_private;
110 uint32_t link_status;
112 PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
114 bnx2x_intr_legacy(sc, 0);
116 if (sc->periodic_flags & PERIODIC_GO)
117 bnx2x_periodic_callout(sc);
118 link_status = REG_RD(sc, sc->link_params.shmem_base +
119 offsetof(struct shmem_region,
120 port_mb[sc->link_params.port].link_status));
121 if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
122 bnx2x_link_update(dev);
126 bnx2x_interrupt_handler(void *param)
128 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
129 struct bnx2x_softc *sc = dev->data->dev_private;
131 bnx2x_interrupt_action(dev);
132 rte_intr_enable(&sc->pci_dev->intr_handle);
136 * Devops - helper functions can be called from user application
140 bnx2x_dev_configure(struct rte_eth_dev *dev)
142 struct bnx2x_softc *sc = dev->data->dev_private;
143 int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
145 PMD_INIT_FUNC_TRACE();
147 if (dev->data->dev_conf.rxmode.jumbo_frame)
148 sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
150 if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
151 PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
155 sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
156 if (sc->num_queues > mp_ncpus) {
157 PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
161 PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
162 sc->num_queues, sc->mtu);
165 if (bnx2x_alloc_ilt_mem(sc) != 0) {
166 PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
170 /* allocate the host hardware/software hsi structures */
171 if (bnx2x_alloc_hsi_mem(sc) != 0) {
172 PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
173 bnx2x_free_ilt_mem(sc);
181 bnx2x_dev_start(struct rte_eth_dev *dev)
183 struct bnx2x_softc *sc = dev->data->dev_private;
186 PMD_INIT_FUNC_TRACE();
188 ret = bnx2x_init(sc);
190 PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
195 rte_intr_callback_register(&sc->pci_dev->intr_handle,
196 bnx2x_interrupt_handler, (void *)dev);
198 if (rte_intr_enable(&sc->pci_dev->intr_handle))
199 PMD_DRV_LOG(ERR, "rte_intr_enable failed");
202 ret = bnx2x_dev_rx_init(dev);
204 PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
208 /* Print important adapter info for the user. */
209 bnx2x_print_adapter_info(sc);
215 bnx2x_dev_stop(struct rte_eth_dev *dev)
217 struct bnx2x_softc *sc = dev->data->dev_private;
220 PMD_INIT_FUNC_TRACE();
223 rte_intr_disable(&sc->pci_dev->intr_handle);
224 rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
225 bnx2x_interrupt_handler, (void *)dev);
228 ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
230 PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
238 bnx2x_dev_close(struct rte_eth_dev *dev)
240 struct bnx2x_softc *sc = dev->data->dev_private;
242 PMD_INIT_FUNC_TRACE();
247 bnx2x_dev_clear_queues(dev);
248 memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
250 /* free the host hardware/software hsi structures */
251 bnx2x_free_hsi_mem(sc);
254 bnx2x_free_ilt_mem(sc);
258 bnx2x_promisc_enable(struct rte_eth_dev *dev)
260 struct bnx2x_softc *sc = dev->data->dev_private;
262 PMD_INIT_FUNC_TRACE();
263 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
264 if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
265 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
266 bnx2x_set_rx_mode(sc);
270 bnx2x_promisc_disable(struct rte_eth_dev *dev)
272 struct bnx2x_softc *sc = dev->data->dev_private;
274 PMD_INIT_FUNC_TRACE();
275 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
276 if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
277 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
278 bnx2x_set_rx_mode(sc);
282 bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
284 struct bnx2x_softc *sc = dev->data->dev_private;
286 PMD_INIT_FUNC_TRACE();
287 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
288 if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
289 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
290 bnx2x_set_rx_mode(sc);
294 bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
296 struct bnx2x_softc *sc = dev->data->dev_private;
298 PMD_INIT_FUNC_TRACE();
299 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
300 if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
301 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
302 bnx2x_set_rx_mode(sc);
306 bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
308 PMD_INIT_FUNC_TRACE();
310 int old_link_status = dev->data->dev_link.link_status;
312 bnx2x_link_update(dev);
314 return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
318 bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
320 int old_link_status = dev->data->dev_link.link_status;
321 struct bnx2x_softc *sc = dev->data->dev_private;
323 bnx2x_link_update(dev);
325 bnx2x_check_bull(sc);
326 if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
327 PMD_DRV_LOG(ERR, "PF indicated channel is down."
328 "VF device is no longer operational");
329 dev->data->dev_link.link_status = ETH_LINK_DOWN;
332 return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
336 bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
338 struct bnx2x_softc *sc = dev->data->dev_private;
339 uint32_t brb_truncate_discard;
341 uint64_t brb_truncates;
343 PMD_INIT_FUNC_TRACE();
345 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
347 memset(stats, 0, sizeof (struct rte_eth_stats));
350 HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
351 sc->eth_stats.total_unicast_packets_received_lo) +
352 HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
353 sc->eth_stats.total_multicast_packets_received_lo) +
354 HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
355 sc->eth_stats.total_broadcast_packets_received_lo);
358 HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
359 sc->eth_stats.total_unicast_packets_transmitted_lo) +
360 HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
361 sc->eth_stats.total_multicast_packets_transmitted_lo) +
362 HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
363 sc->eth_stats.total_broadcast_packets_transmitted_lo);
366 HILO_U64(sc->eth_stats.total_bytes_received_hi,
367 sc->eth_stats.total_bytes_received_lo);
370 HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
371 sc->eth_stats.total_bytes_transmitted_lo);
374 HILO_U64(sc->eth_stats.error_bytes_received_hi,
375 sc->eth_stats.error_bytes_received_lo);
380 HILO_U64(sc->eth_stats.no_buff_discard_hi,
381 sc->eth_stats.no_buff_discard_lo);
384 HILO_U64(sc->eth_stats.brb_drop_hi,
385 sc->eth_stats.brb_drop_lo);
388 HILO_U64(sc->eth_stats.brb_truncate_hi,
389 sc->eth_stats.brb_truncate_lo);
391 brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
393 stats->imissed = brb_drops + brb_truncates +
394 brb_truncate_discard + stats->rx_nombuf;
400 bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
401 struct rte_eth_xstat_name *xstats_names,
402 __rte_unused unsigned limit)
404 unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings);
406 if (xstats_names != NULL)
407 for (i = 0; i < stat_cnt; i++)
408 snprintf(xstats_names[i].name,
409 sizeof(xstats_names[i].name),
411 bnx2x_xstats_strings[i].name);
417 bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
420 struct bnx2x_softc *sc = dev->data->dev_private;
421 unsigned int num = RTE_DIM(bnx2x_xstats_strings);
426 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
428 for (num = 0; num < n; num++) {
429 if (bnx2x_xstats_strings[num].offset_hi !=
430 bnx2x_xstats_strings[num].offset_lo)
431 xstats[num].value = HILO_U64(
432 *(uint32_t *)((char *)&sc->eth_stats +
433 bnx2x_xstats_strings[num].offset_hi),
434 *(uint32_t *)((char *)&sc->eth_stats +
435 bnx2x_xstats_strings[num].offset_lo));
438 *(uint64_t *)((char *)&sc->eth_stats +
439 bnx2x_xstats_strings[num].offset_lo);
440 xstats[num].id = num;
447 bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
449 struct bnx2x_softc *sc = dev->data->dev_private;
450 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
451 dev_info->max_rx_queues = sc->max_rx_queues;
452 dev_info->max_tx_queues = sc->max_tx_queues;
453 dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
454 dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
455 dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
456 dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
460 bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
461 uint32_t index, uint32_t pool)
463 struct bnx2x_softc *sc = dev->data->dev_private;
465 if (sc->mac_ops.mac_addr_add) {
466 sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
473 bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
475 struct bnx2x_softc *sc = dev->data->dev_private;
477 if (sc->mac_ops.mac_addr_remove)
478 sc->mac_ops.mac_addr_remove(dev, index);
481 static const struct eth_dev_ops bnx2x_eth_dev_ops = {
482 .dev_configure = bnx2x_dev_configure,
483 .dev_start = bnx2x_dev_start,
484 .dev_stop = bnx2x_dev_stop,
485 .dev_close = bnx2x_dev_close,
486 .promiscuous_enable = bnx2x_promisc_enable,
487 .promiscuous_disable = bnx2x_promisc_disable,
488 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
489 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
490 .link_update = bnx2x_dev_link_update,
491 .stats_get = bnx2x_dev_stats_get,
492 .xstats_get = bnx2x_dev_xstats_get,
493 .xstats_get_names = bnx2x_get_xstats_names,
494 .dev_infos_get = bnx2x_dev_infos_get,
495 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
496 .rx_queue_release = bnx2x_dev_rx_queue_release,
497 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
498 .tx_queue_release = bnx2x_dev_tx_queue_release,
499 .mac_addr_add = bnx2x_mac_addr_add,
500 .mac_addr_remove = bnx2x_mac_addr_remove,
504 * dev_ops for virtual function
506 static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
507 .dev_configure = bnx2x_dev_configure,
508 .dev_start = bnx2x_dev_start,
509 .dev_stop = bnx2x_dev_stop,
510 .dev_close = bnx2x_dev_close,
511 .promiscuous_enable = bnx2x_promisc_enable,
512 .promiscuous_disable = bnx2x_promisc_disable,
513 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
514 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
515 .link_update = bnx2xvf_dev_link_update,
516 .stats_get = bnx2x_dev_stats_get,
517 .xstats_get = bnx2x_dev_xstats_get,
518 .xstats_get_names = bnx2x_get_xstats_names,
519 .dev_infos_get = bnx2x_dev_infos_get,
520 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
521 .rx_queue_release = bnx2x_dev_rx_queue_release,
522 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
523 .tx_queue_release = bnx2x_dev_tx_queue_release,
524 .mac_addr_add = bnx2x_mac_addr_add,
525 .mac_addr_remove = bnx2x_mac_addr_remove,
530 bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
533 struct rte_pci_device *pci_dev;
534 struct bnx2x_softc *sc;
536 PMD_INIT_FUNC_TRACE();
538 eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
539 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
541 rte_eth_copy_pci_info(eth_dev, pci_dev);
543 sc = eth_dev->data->dev_private;
544 sc->pcie_bus = pci_dev->addr.bus;
545 sc->pcie_device = pci_dev->addr.devid;
548 sc->flags = BNX2X_IS_VF_FLAG;
550 sc->devinfo.vendor_id = pci_dev->id.vendor_id;
551 sc->devinfo.device_id = pci_dev->id.device_id;
552 sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
553 sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
555 sc->pcie_func = pci_dev->addr.function;
556 sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
558 sc->bar[BAR1].base_addr = (void *)
559 ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
561 sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
563 assert(sc->bar[BAR0].base_addr);
564 assert(sc->bar[BAR1].base_addr);
566 bnx2x_load_firmware(sc);
567 assert(sc->firmware);
569 if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
572 sc->rx_budget = BNX2X_RX_BUDGET;
573 sc->hc_rx_ticks = BNX2X_RX_TICKS;
574 sc->hc_tx_ticks = BNX2X_TX_TICKS;
576 sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
577 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
579 sc->pci_dev = pci_dev;
580 ret = bnx2x_attach(sc);
582 PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
586 eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
588 PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
589 sc->pcie_bus, sc->pcie_device);
590 PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
591 sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
592 PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
593 PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
594 PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
595 eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
598 rte_spinlock_init(&sc->vf2pf_lock);
600 if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
601 &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
602 RTE_CACHE_LINE_SIZE) != 0)
605 sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
606 sc->vf2pf_mbox_mapping.vaddr;
608 if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
609 &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
610 RTE_CACHE_LINE_SIZE) != 0)
613 sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
614 sc->pf2vf_bulletin_mapping.vaddr;
616 ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
626 eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
628 PMD_INIT_FUNC_TRACE();
629 return bnx2x_common_dev_init(eth_dev, 0);
633 eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
635 PMD_INIT_FUNC_TRACE();
636 return bnx2x_common_dev_init(eth_dev, 1);
639 static struct rte_pci_driver rte_bnx2x_pmd;
640 static struct rte_pci_driver rte_bnx2xvf_pmd;
642 static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
643 struct rte_pci_device *pci_dev)
645 struct rte_eth_dev *eth_dev;
648 eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct bnx2x_softc));
652 if (pci_drv == &rte_bnx2x_pmd)
653 ret = eth_bnx2x_dev_init(eth_dev);
654 else if (pci_drv == &rte_bnx2xvf_pmd)
655 ret = eth_bnx2xvf_dev_init(eth_dev);
660 rte_eth_dev_pci_release(eth_dev);
665 static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
667 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
670 static struct rte_pci_driver rte_bnx2x_pmd = {
671 .id_table = pci_id_bnx2x_map,
672 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
673 .probe = eth_bnx2x_pci_probe,
674 .remove = eth_bnx2x_pci_remove,
678 * virtual function driver struct
680 static struct rte_pci_driver rte_bnx2xvf_pmd = {
681 .id_table = pci_id_bnx2xvf_map,
682 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
683 .probe = eth_bnx2x_pci_probe,
684 .remove = eth_bnx2x_pci_remove,
687 RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
688 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
689 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
690 RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
691 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
692 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
694 RTE_INIT(bnx2x_init_log);
698 bnx2x_logtype_init = rte_log_register("pmd.bnx2x.init");
699 if (bnx2x_logtype_init >= 0)
700 rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE);
701 bnx2x_logtype_driver = rte_log_register("pmd.bnx2x.driver");
702 if (bnx2x_logtype_driver >= 0)
703 rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE);