1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
3 * Copyright (c) 2015-2018 Cavium Inc.
9 #include "bnx2x_rxtx.h"
11 #include <rte_string_fns.h>
13 #include <ethdev_pci.h>
14 #include <rte_alarm.h>
17 * The set of PCI devices this driver supports
19 #define BROADCOM_PCI_VENDOR_ID 0x14E4
20 #define QLOGIC_PCI_VENDOR_ID 0x1077
21 static const struct rte_pci_id pci_id_bnx2x_map[] = {
22 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
23 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
24 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
25 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
26 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
27 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
28 { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
29 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
30 #ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
31 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
32 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
33 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
34 { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
39 static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
40 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
41 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
42 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
43 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
44 { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
48 struct rte_bnx2x_xstats_name_off {
49 char name[RTE_ETH_XSTATS_NAME_SIZE];
54 static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
56 offsetof(struct bnx2x_eth_stats, brb_drop_hi),
57 offsetof(struct bnx2x_eth_stats, brb_drop_lo)},
58 {"rx_buffer_truncates",
59 offsetof(struct bnx2x_eth_stats, brb_truncate_hi),
60 offsetof(struct bnx2x_eth_stats, brb_truncate_lo)},
61 {"rx_buffer_truncate_discard",
62 offsetof(struct bnx2x_eth_stats, brb_truncate_discard),
63 offsetof(struct bnx2x_eth_stats, brb_truncate_discard)},
64 {"mac_filter_discard",
65 offsetof(struct bnx2x_eth_stats, mac_filter_discard),
66 offsetof(struct bnx2x_eth_stats, mac_filter_discard)},
67 {"no_match_vlan_tag_discard",
68 offsetof(struct bnx2x_eth_stats, mf_tag_discard),
69 offsetof(struct bnx2x_eth_stats, mf_tag_discard)},
71 offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi),
72 offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)},
74 offsetof(struct bnx2x_eth_stats, pause_frames_received_hi),
75 offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)},
76 {"tx_priority_flow_control",
77 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi),
78 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)},
79 {"rx_priority_flow_control",
80 offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi),
81 offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
85 bnx2x_link_update(struct rte_eth_dev *dev)
87 struct bnx2x_softc *sc = dev->data->dev_private;
88 struct rte_eth_link link;
90 PMD_INIT_FUNC_TRACE(sc);
92 memset(&link, 0, sizeof(link));
94 link.link_speed = sc->link_vars.line_speed;
95 switch (sc->link_vars.duplex) {
97 link.link_duplex = ETH_LINK_FULL_DUPLEX;
100 link.link_duplex = ETH_LINK_HALF_DUPLEX;
103 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
104 ETH_LINK_SPEED_FIXED);
105 link.link_status = sc->link_vars.link_up;
107 return rte_eth_linkstatus_set(dev, &link);
111 bnx2x_interrupt_action(struct rte_eth_dev *dev, int intr_cxt)
113 struct bnx2x_softc *sc = dev->data->dev_private;
114 uint32_t link_status;
116 bnx2x_intr_legacy(sc);
118 if ((atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO) &&
120 bnx2x_periodic_callout(sc);
121 link_status = REG_RD(sc, sc->link_params.shmem_base +
122 offsetof(struct shmem_region,
123 port_mb[sc->link_params.port].link_status));
124 if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
125 bnx2x_link_update(dev);
129 bnx2x_interrupt_handler(void *param)
131 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
132 struct bnx2x_softc *sc = dev->data->dev_private;
134 PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled");
136 bnx2x_interrupt_action(dev, 1);
137 rte_intr_ack(&sc->pci_dev->intr_handle);
140 static void bnx2x_periodic_start(void *param)
142 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
143 struct bnx2x_softc *sc = dev->data->dev_private;
146 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
147 bnx2x_interrupt_action(dev, 0);
149 ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
150 bnx2x_periodic_start, (void *)dev);
152 PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
153 " timer rc %d", ret);
158 void bnx2x_periodic_stop(void *param)
160 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
161 struct bnx2x_softc *sc = dev->data->dev_private;
163 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
165 rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
167 PMD_DRV_LOG(DEBUG, sc, "Periodic poll stopped");
171 * Devops - helper functions can be called from user application
175 bnx2x_dev_configure(struct rte_eth_dev *dev)
177 struct bnx2x_softc *sc = dev->data->dev_private;
179 int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
181 PMD_INIT_FUNC_TRACE(sc);
183 sc->mtu = dev->data->dev_conf.rxmode.mtu;
185 if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
186 PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
190 sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
191 if (sc->num_queues > mp_ncpus) {
192 PMD_DRV_LOG(ERR, sc, "The number of queues is more than number of CPUs");
196 PMD_DRV_LOG(DEBUG, sc, "num_queues=%d, mtu=%d",
197 sc->num_queues, sc->mtu);
200 if (bnx2x_alloc_ilt_mem(sc) != 0) {
201 PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_ilt_mem was failed");
205 bnx2x_dev_rxtx_init_dummy(dev);
210 bnx2x_dev_start(struct rte_eth_dev *dev)
212 struct bnx2x_softc *sc = dev->data->dev_private;
215 PMD_INIT_FUNC_TRACE(sc);
217 /* start the periodic callout */
219 if (atomic_load_acq_long(&sc->periodic_flags) ==
221 bnx2x_periodic_start(dev);
222 PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started");
226 ret = bnx2x_init(sc);
228 PMD_DRV_LOG(DEBUG, sc, "bnx2x_init failed (%d)", ret);
233 rte_intr_callback_register(&sc->pci_dev->intr_handle,
234 bnx2x_interrupt_handler, (void *)dev);
236 if (rte_intr_enable(&sc->pci_dev->intr_handle))
237 PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed");
240 /* Configure the previously stored Multicast address list */
242 bnx2x_vfpf_set_mcast(sc, sc->mc_addrs, sc->mc_addrs_num);
243 bnx2x_dev_rxtx_init(dev);
245 bnx2x_print_device_info(sc);
251 bnx2x_dev_stop(struct rte_eth_dev *dev)
253 struct bnx2x_softc *sc = dev->data->dev_private;
256 PMD_INIT_FUNC_TRACE(sc);
258 bnx2x_dev_rxtx_init_dummy(dev);
261 rte_intr_disable(&sc->pci_dev->intr_handle);
262 rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
263 bnx2x_interrupt_handler, (void *)dev);
265 /* stop the periodic callout */
266 bnx2x_periodic_stop(dev);
268 /* Remove the configured Multicast list
269 * Sending NULL for the list of address and the
270 * Number is set to 0 denoting DEL_CMD
273 bnx2x_vfpf_set_mcast(sc, NULL, 0);
274 ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
276 PMD_DRV_LOG(DEBUG, sc, "bnx2x_nic_unload failed (%d)", ret);
284 bnx2x_dev_close(struct rte_eth_dev *dev)
286 struct bnx2x_softc *sc = dev->data->dev_private;
288 PMD_INIT_FUNC_TRACE(sc);
290 /* only close in case of the primary process */
291 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
297 bnx2x_dev_clear_queues(dev);
298 memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
301 bnx2x_free_ilt_mem(sc);
303 /* mac_addrs must not be freed alone because part of dev_private */
304 dev->data->mac_addrs = NULL;
310 bnx2x_promisc_enable(struct rte_eth_dev *dev)
312 struct bnx2x_softc *sc = dev->data->dev_private;
314 PMD_INIT_FUNC_TRACE(sc);
315 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
316 if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
317 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
318 bnx2x_set_rx_mode(sc);
324 bnx2x_promisc_disable(struct rte_eth_dev *dev)
326 struct bnx2x_softc *sc = dev->data->dev_private;
328 PMD_INIT_FUNC_TRACE(sc);
329 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
330 if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
331 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
332 bnx2x_set_rx_mode(sc);
338 bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
340 struct bnx2x_softc *sc = dev->data->dev_private;
342 PMD_INIT_FUNC_TRACE(sc);
343 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
344 if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
345 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
346 bnx2x_set_rx_mode(sc);
352 bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
354 struct bnx2x_softc *sc = dev->data->dev_private;
356 PMD_INIT_FUNC_TRACE(sc);
357 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
358 if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
359 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
360 bnx2x_set_rx_mode(sc);
366 bnx2x_dev_set_mc_addr_list(struct rte_eth_dev *dev,
367 struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num)
369 struct bnx2x_softc *sc = dev->data->dev_private;
371 PMD_INIT_FUNC_TRACE(sc);
372 /* flush previous addresses */
373 err = bnx2x_vfpf_set_mcast(sc, NULL, 0);
376 sc->mc_addrs_num = 0;
379 err = bnx2x_vfpf_set_mcast(sc, mc_addrs, mc_addrs_num);
383 sc->mc_addrs_num = mc_addrs_num;
384 memcpy(sc->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
390 bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
392 struct bnx2x_softc *sc = dev->data->dev_private;
394 PMD_INIT_FUNC_TRACE(sc);
396 return bnx2x_link_update(dev);
400 bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
402 struct bnx2x_softc *sc = dev->data->dev_private;
405 ret = bnx2x_link_update(dev);
407 bnx2x_check_bull(sc);
408 if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
409 PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
410 "VF device is no longer operational");
411 dev->data->dev_link.link_status = ETH_LINK_DOWN;
418 bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
420 struct bnx2x_softc *sc = dev->data->dev_private;
421 uint32_t brb_truncate_discard;
423 uint64_t brb_truncates;
425 PMD_INIT_FUNC_TRACE(sc);
427 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
429 memset(stats, 0, sizeof (struct rte_eth_stats));
432 HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
433 sc->eth_stats.total_unicast_packets_received_lo) +
434 HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
435 sc->eth_stats.total_multicast_packets_received_lo) +
436 HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
437 sc->eth_stats.total_broadcast_packets_received_lo);
440 HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
441 sc->eth_stats.total_unicast_packets_transmitted_lo) +
442 HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
443 sc->eth_stats.total_multicast_packets_transmitted_lo) +
444 HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
445 sc->eth_stats.total_broadcast_packets_transmitted_lo);
448 HILO_U64(sc->eth_stats.total_bytes_received_hi,
449 sc->eth_stats.total_bytes_received_lo);
452 HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
453 sc->eth_stats.total_bytes_transmitted_lo);
456 HILO_U64(sc->eth_stats.error_bytes_received_hi,
457 sc->eth_stats.error_bytes_received_lo);
462 HILO_U64(sc->eth_stats.no_buff_discard_hi,
463 sc->eth_stats.no_buff_discard_lo);
466 HILO_U64(sc->eth_stats.brb_drop_hi,
467 sc->eth_stats.brb_drop_lo);
470 HILO_U64(sc->eth_stats.brb_truncate_hi,
471 sc->eth_stats.brb_truncate_lo);
473 brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
475 stats->imissed = brb_drops + brb_truncates +
476 brb_truncate_discard + stats->rx_nombuf;
482 bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
483 struct rte_eth_xstat_name *xstats_names,
484 __rte_unused unsigned limit)
486 unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings);
488 if (xstats_names != NULL)
489 for (i = 0; i < stat_cnt; i++)
490 strlcpy(xstats_names[i].name,
491 bnx2x_xstats_strings[i].name,
492 sizeof(xstats_names[i].name));
498 bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
501 struct bnx2x_softc *sc = dev->data->dev_private;
502 unsigned int num = RTE_DIM(bnx2x_xstats_strings);
507 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
509 for (num = 0; num < n; num++) {
510 if (bnx2x_xstats_strings[num].offset_hi !=
511 bnx2x_xstats_strings[num].offset_lo)
512 xstats[num].value = HILO_U64(
513 *(uint32_t *)((char *)&sc->eth_stats +
514 bnx2x_xstats_strings[num].offset_hi),
515 *(uint32_t *)((char *)&sc->eth_stats +
516 bnx2x_xstats_strings[num].offset_lo));
519 *(uint64_t *)((char *)&sc->eth_stats +
520 bnx2x_xstats_strings[num].offset_lo);
521 xstats[num].id = num;
528 bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
530 struct bnx2x_softc *sc = dev->data->dev_private;
532 dev_info->max_rx_queues = sc->max_rx_queues;
533 dev_info->max_tx_queues = sc->max_tx_queues;
534 dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
535 dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
536 dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
537 dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
538 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
540 dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
541 dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
542 dev_info->rx_desc_lim.nb_mtu_seg_max = 1;
543 dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL;
549 bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
550 uint32_t index, uint32_t pool)
552 struct bnx2x_softc *sc = dev->data->dev_private;
554 if (sc->mac_ops.mac_addr_add) {
555 sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
562 bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
564 struct bnx2x_softc *sc = dev->data->dev_private;
566 if (sc->mac_ops.mac_addr_remove)
567 sc->mac_ops.mac_addr_remove(dev, index);
570 static const struct eth_dev_ops bnx2x_eth_dev_ops = {
571 .dev_configure = bnx2x_dev_configure,
572 .dev_start = bnx2x_dev_start,
573 .dev_stop = bnx2x_dev_stop,
574 .dev_close = bnx2x_dev_close,
575 .promiscuous_enable = bnx2x_promisc_enable,
576 .promiscuous_disable = bnx2x_promisc_disable,
577 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
578 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
579 .link_update = bnx2x_dev_link_update,
580 .stats_get = bnx2x_dev_stats_get,
581 .xstats_get = bnx2x_dev_xstats_get,
582 .xstats_get_names = bnx2x_get_xstats_names,
583 .dev_infos_get = bnx2x_dev_infos_get,
584 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
585 .rx_queue_release = bnx2x_dev_rx_queue_release,
586 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
587 .tx_queue_release = bnx2x_dev_tx_queue_release,
588 .mac_addr_add = bnx2x_mac_addr_add,
589 .mac_addr_remove = bnx2x_mac_addr_remove,
593 * dev_ops for virtual function
595 static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
596 .dev_configure = bnx2x_dev_configure,
597 .dev_start = bnx2x_dev_start,
598 .dev_stop = bnx2x_dev_stop,
599 .dev_close = bnx2x_dev_close,
600 .promiscuous_enable = bnx2x_promisc_enable,
601 .promiscuous_disable = bnx2x_promisc_disable,
602 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
603 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
604 .set_mc_addr_list = bnx2x_dev_set_mc_addr_list,
605 .link_update = bnx2xvf_dev_link_update,
606 .stats_get = bnx2x_dev_stats_get,
607 .xstats_get = bnx2x_dev_xstats_get,
608 .xstats_get_names = bnx2x_get_xstats_names,
609 .dev_infos_get = bnx2x_dev_infos_get,
610 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
611 .rx_queue_release = bnx2x_dev_rx_queue_release,
612 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
613 .tx_queue_release = bnx2x_dev_tx_queue_release,
614 .mac_addr_add = bnx2x_mac_addr_add,
615 .mac_addr_remove = bnx2x_mac_addr_remove,
620 bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
623 struct rte_pci_device *pci_dev;
624 struct rte_pci_addr pci_addr;
625 struct bnx2x_softc *sc;
626 static bool adapter_info = true;
628 /* Extract key data structures */
629 sc = eth_dev->data->dev_private;
630 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
631 pci_addr = pci_dev->addr;
633 snprintf(sc->devinfo.name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
634 pci_addr.bus, pci_addr.devid, pci_addr.function,
635 eth_dev->data->port_id);
637 PMD_INIT_FUNC_TRACE(sc);
639 eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
641 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
642 PMD_DRV_LOG(ERR, sc, "Skipping device init from secondary process");
646 rte_eth_copy_pci_info(eth_dev, pci_dev);
648 sc->pcie_bus = pci_dev->addr.bus;
649 sc->pcie_device = pci_dev->addr.devid;
651 sc->devinfo.vendor_id = pci_dev->id.vendor_id;
652 sc->devinfo.device_id = pci_dev->id.device_id;
653 sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
654 sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
657 sc->flags = BNX2X_IS_VF_FLAG;
659 sc->pcie_func = pci_dev->addr.function;
660 sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
662 sc->bar[BAR1].base_addr = (void *)
663 ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
665 sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
667 assert(sc->bar[BAR0].base_addr);
668 assert(sc->bar[BAR1].base_addr);
670 bnx2x_load_firmware(sc);
671 assert(sc->firmware);
673 if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
676 sc->rx_budget = BNX2X_RX_BUDGET;
677 sc->hc_rx_ticks = BNX2X_RX_TICKS;
678 sc->hc_tx_ticks = BNX2X_TX_TICKS;
680 sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
681 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
683 sc->pci_dev = pci_dev;
684 ret = bnx2x_attach(sc);
686 PMD_DRV_LOG(ERR, sc, "bnx2x_attach failed (%d)", ret);
690 /* Print important adapter info for the user. */
692 bnx2x_print_adapter_info(sc);
693 adapter_info = false;
696 /* schedule periodic poll for slowpath link events */
698 PMD_DRV_LOG(DEBUG, sc, "Scheduling periodic poll for slowpath link events");
699 ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
700 bnx2x_periodic_start, (void *)eth_dev);
702 PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
703 " timer rc %d", ret);
708 eth_dev->data->mac_addrs =
709 (struct rte_ether_addr *)sc->link_params.mac_addr;
712 rte_spinlock_init(&sc->vf2pf_lock);
714 ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
715 &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
716 RTE_CACHE_LINE_SIZE);
720 sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
721 sc->vf2pf_mbox_mapping.vaddr;
723 ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
724 &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
725 RTE_CACHE_LINE_SIZE);
729 sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
730 sc->pf2vf_bulletin_mapping.vaddr;
732 ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
742 bnx2x_periodic_stop(eth_dev);
748 eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
750 struct bnx2x_softc *sc = eth_dev->data->dev_private;
751 PMD_INIT_FUNC_TRACE(sc);
752 return bnx2x_common_dev_init(eth_dev, 0);
756 eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
758 struct bnx2x_softc *sc = eth_dev->data->dev_private;
759 PMD_INIT_FUNC_TRACE(sc);
760 return bnx2x_common_dev_init(eth_dev, 1);
763 static int eth_bnx2x_dev_uninit(struct rte_eth_dev *eth_dev)
765 struct bnx2x_softc *sc = eth_dev->data->dev_private;
766 PMD_INIT_FUNC_TRACE(sc);
767 bnx2x_dev_close(eth_dev);
771 static struct rte_pci_driver rte_bnx2x_pmd;
772 static struct rte_pci_driver rte_bnx2xvf_pmd;
774 static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
775 struct rte_pci_device *pci_dev)
777 if (pci_drv == &rte_bnx2x_pmd)
778 return rte_eth_dev_pci_generic_probe(pci_dev,
779 sizeof(struct bnx2x_softc), eth_bnx2x_dev_init);
780 else if (pci_drv == &rte_bnx2xvf_pmd)
781 return rte_eth_dev_pci_generic_probe(pci_dev,
782 sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init);
787 static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
789 return rte_eth_dev_pci_generic_remove(pci_dev, eth_bnx2x_dev_uninit);
792 static struct rte_pci_driver rte_bnx2x_pmd = {
793 .id_table = pci_id_bnx2x_map,
794 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
795 .probe = eth_bnx2x_pci_probe,
796 .remove = eth_bnx2x_pci_remove,
800 * virtual function driver struct
802 static struct rte_pci_driver rte_bnx2xvf_pmd = {
803 .id_table = pci_id_bnx2xvf_map,
804 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
805 .probe = eth_bnx2x_pci_probe,
806 .remove = eth_bnx2x_pci_remove,
809 RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
810 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
811 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
812 RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
813 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
814 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
815 RTE_LOG_REGISTER_SUFFIX(bnx2x_logtype_init, init, NOTICE);
816 RTE_LOG_REGISTER_SUFFIX(bnx2x_logtype_driver, driver, NOTICE);