2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
4 * Copyright (c) 2015 QLogic Corporation.
8 * See LICENSE.bnx2x_pmd for copyright and licensing details.
12 #include "bnx2x_rxtx.h"
17 * The set of PCI devices this driver supports
19 static struct rte_pci_id pci_id_bnx2x_map[] = {
20 #define RTE_PCI_DEV_ID_DECL_BNX2X(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
21 #include "rte_pci_dev_ids.h"
25 static struct rte_pci_id pci_id_bnx2xvf_map[] = {
26 #define RTE_PCI_DEV_ID_DECL_BNX2XVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
27 #include "rte_pci_dev_ids.h"
32 bnx2x_link_update(struct rte_eth_dev *dev)
34 struct bnx2x_softc *sc = dev->data->dev_private;
36 PMD_INIT_FUNC_TRACE();
37 bnx2x_link_status_update(sc);
39 dev->data->dev_link.link_speed = sc->link_vars.line_speed;
40 switch (sc->link_vars.duplex) {
42 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
45 dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
48 dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
49 ETH_LINK_SPEED_FIXED);
50 dev->data->dev_link.link_status = sc->link_vars.link_up;
54 bnx2x_interrupt_action(struct rte_eth_dev *dev)
56 struct bnx2x_softc *sc = dev->data->dev_private;
59 PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
61 if (bnx2x_intr_legacy(sc, 0))
63 if (sc->periodic_flags & PERIODIC_GO)
64 bnx2x_periodic_callout(sc);
65 link_status = REG_RD(sc, sc->link_params.shmem_base +
66 offsetof(struct shmem_region,
67 port_mb[sc->link_params.port].link_status));
68 if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
69 bnx2x_link_update(dev);
72 static __rte_unused void
73 bnx2x_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
75 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
77 bnx2x_interrupt_action(dev);
78 rte_intr_enable(&(dev->pci_dev->intr_handle));
82 * Devops - helper functions can be called from user application
86 bnx2x_dev_configure(struct rte_eth_dev *dev)
88 struct bnx2x_softc *sc = dev->data->dev_private;
89 int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
91 PMD_INIT_FUNC_TRACE();
93 if (dev->data->dev_conf.rxmode.jumbo_frame)
94 sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
96 if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
97 PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
101 sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
102 if (sc->num_queues > mp_ncpus) {
103 PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
107 PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
108 sc->num_queues, sc->mtu);
111 if (bnx2x_alloc_ilt_mem(sc) != 0) {
112 PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
116 /* allocate the host hardware/software hsi structures */
117 if (bnx2x_alloc_hsi_mem(sc) != 0) {
118 PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
119 bnx2x_free_ilt_mem(sc);
127 bnx2x_dev_start(struct rte_eth_dev *dev)
129 struct bnx2x_softc *sc = dev->data->dev_private;
132 PMD_INIT_FUNC_TRACE();
134 ret = bnx2x_init(sc);
136 PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
141 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
142 bnx2x_interrupt_handler, (void *)dev);
144 if(rte_intr_enable(&(dev->pci_dev->intr_handle)))
145 PMD_DRV_LOG(ERR, "rte_intr_enable failed");
148 ret = bnx2x_dev_rx_init(dev);
150 PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
154 /* Print important adapter info for the user. */
155 bnx2x_print_adapter_info(sc);
163 bnx2x_dev_stop(struct rte_eth_dev *dev)
165 struct bnx2x_softc *sc = dev->data->dev_private;
168 PMD_INIT_FUNC_TRACE();
171 rte_intr_disable(&(dev->pci_dev->intr_handle));
172 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
173 bnx2x_interrupt_handler, (void *)dev);
176 ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
178 PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
186 bnx2x_dev_close(struct rte_eth_dev *dev)
188 struct bnx2x_softc *sc = dev->data->dev_private;
190 PMD_INIT_FUNC_TRACE();
195 bnx2x_dev_clear_queues(dev);
196 memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
198 /* free the host hardware/software hsi structures */
199 bnx2x_free_hsi_mem(sc);
202 bnx2x_free_ilt_mem(sc);
206 bnx2x_promisc_enable(struct rte_eth_dev *dev)
208 struct bnx2x_softc *sc = dev->data->dev_private;
210 PMD_INIT_FUNC_TRACE();
211 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
212 bnx2x_set_rx_mode(sc);
216 bnx2x_promisc_disable(struct rte_eth_dev *dev)
218 struct bnx2x_softc *sc = dev->data->dev_private;
220 PMD_INIT_FUNC_TRACE();
221 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
222 bnx2x_set_rx_mode(sc);
226 bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
228 struct bnx2x_softc *sc = dev->data->dev_private;
230 PMD_INIT_FUNC_TRACE();
231 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
232 bnx2x_set_rx_mode(sc);
236 bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
238 struct bnx2x_softc *sc = dev->data->dev_private;
240 PMD_INIT_FUNC_TRACE();
241 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
242 bnx2x_set_rx_mode(sc);
246 bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
248 PMD_INIT_FUNC_TRACE();
250 int old_link_status = dev->data->dev_link.link_status;
252 bnx2x_link_update(dev);
254 return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
258 bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
260 int old_link_status = dev->data->dev_link.link_status;
261 struct bnx2x_softc *sc = dev->data->dev_private;
263 bnx2x_link_update(dev);
265 bnx2x_check_bull(sc);
266 if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
267 PMD_DRV_LOG(ERR, "PF indicated channel is down."
268 "VF device is no longer operational");
269 dev->data->dev_link.link_status = ETH_LINK_DOWN;
272 return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
276 bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
278 struct bnx2x_softc *sc = dev->data->dev_private;
279 uint32_t brb_truncate_discard;
281 uint64_t brb_truncates;
283 PMD_INIT_FUNC_TRACE();
285 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
287 memset(stats, 0, sizeof (struct rte_eth_stats));
290 HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
291 sc->eth_stats.total_unicast_packets_received_lo) +
292 HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
293 sc->eth_stats.total_multicast_packets_received_lo) +
294 HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
295 sc->eth_stats.total_broadcast_packets_received_lo);
298 HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
299 sc->eth_stats.total_unicast_packets_transmitted_lo) +
300 HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
301 sc->eth_stats.total_multicast_packets_transmitted_lo) +
302 HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
303 sc->eth_stats.total_broadcast_packets_transmitted_lo);
306 HILO_U64(sc->eth_stats.total_bytes_received_hi,
307 sc->eth_stats.total_bytes_received_lo);
310 HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
311 sc->eth_stats.total_bytes_transmitted_lo);
314 HILO_U64(sc->eth_stats.error_bytes_received_hi,
315 sc->eth_stats.error_bytes_received_lo);
320 HILO_U64(sc->eth_stats.no_buff_discard_hi,
321 sc->eth_stats.no_buff_discard_lo);
324 HILO_U64(sc->eth_stats.brb_drop_hi,
325 sc->eth_stats.brb_drop_lo);
328 HILO_U64(sc->eth_stats.brb_truncate_hi,
329 sc->eth_stats.brb_truncate_lo);
331 brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
333 stats->imissed = brb_drops + brb_truncates +
334 brb_truncate_discard + stats->rx_nombuf;
338 bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
340 struct bnx2x_softc *sc = dev->data->dev_private;
341 dev_info->max_rx_queues = sc->max_rx_queues;
342 dev_info->max_tx_queues = sc->max_tx_queues;
343 dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
344 dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
345 dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
346 dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
350 bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
351 uint32_t index, uint32_t pool)
353 struct bnx2x_softc *sc = dev->data->dev_private;
355 if (sc->mac_ops.mac_addr_add)
356 sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
360 bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
362 struct bnx2x_softc *sc = dev->data->dev_private;
364 if (sc->mac_ops.mac_addr_remove)
365 sc->mac_ops.mac_addr_remove(dev, index);
368 static const struct eth_dev_ops bnx2x_eth_dev_ops = {
369 .dev_configure = bnx2x_dev_configure,
370 .dev_start = bnx2x_dev_start,
371 .dev_stop = bnx2x_dev_stop,
372 .dev_close = bnx2x_dev_close,
373 .promiscuous_enable = bnx2x_promisc_enable,
374 .promiscuous_disable = bnx2x_promisc_disable,
375 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
376 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
377 .link_update = bnx2x_dev_link_update,
378 .stats_get = bnx2x_dev_stats_get,
379 .dev_infos_get = bnx2x_dev_infos_get,
380 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
381 .rx_queue_release = bnx2x_dev_rx_queue_release,
382 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
383 .tx_queue_release = bnx2x_dev_tx_queue_release,
384 .mac_addr_add = bnx2x_mac_addr_add,
385 .mac_addr_remove = bnx2x_mac_addr_remove,
389 * dev_ops for virtual function
391 static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
392 .dev_configure = bnx2x_dev_configure,
393 .dev_start = bnx2x_dev_start,
394 .dev_stop = bnx2x_dev_stop,
395 .dev_close = bnx2x_dev_close,
396 .promiscuous_enable = bnx2x_promisc_enable,
397 .promiscuous_disable = bnx2x_promisc_disable,
398 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
399 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
400 .link_update = bnx2xvf_dev_link_update,
401 .stats_get = bnx2x_dev_stats_get,
402 .dev_infos_get = bnx2x_dev_infos_get,
403 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
404 .rx_queue_release = bnx2x_dev_rx_queue_release,
405 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
406 .tx_queue_release = bnx2x_dev_tx_queue_release,
407 .mac_addr_add = bnx2x_mac_addr_add,
408 .mac_addr_remove = bnx2x_mac_addr_remove,
413 bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
416 struct rte_pci_device *pci_dev;
417 struct bnx2x_softc *sc;
419 PMD_INIT_FUNC_TRACE();
421 eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
422 pci_dev = eth_dev->pci_dev;
424 rte_eth_copy_pci_info(eth_dev, pci_dev);
426 sc = eth_dev->data->dev_private;
427 sc->pcie_bus = pci_dev->addr.bus;
428 sc->pcie_device = pci_dev->addr.devid;
431 sc->flags = BNX2X_IS_VF_FLAG;
433 sc->devinfo.vendor_id = pci_dev->id.vendor_id;
434 sc->devinfo.device_id = pci_dev->id.device_id;
435 sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
436 sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
438 sc->pcie_func = pci_dev->addr.function;
439 sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
441 sc->bar[BAR1].base_addr = (void *)
442 ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
444 sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
446 assert(sc->bar[BAR0].base_addr);
447 assert(sc->bar[BAR1].base_addr);
449 bnx2x_load_firmware(sc);
450 assert(sc->firmware);
452 if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
455 sc->rx_budget = BNX2X_RX_BUDGET;
456 sc->hc_rx_ticks = BNX2X_RX_TICKS;
457 sc->hc_tx_ticks = BNX2X_TX_TICKS;
459 sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
460 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
462 sc->pci_dev = pci_dev;
463 ret = bnx2x_attach(sc);
465 PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
469 eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
471 PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
472 sc->pcie_bus, sc->pcie_device);
473 PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
474 sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
475 PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
476 PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
477 PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
478 eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
481 if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
482 &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
483 RTE_CACHE_LINE_SIZE) != 0)
486 sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
487 sc->vf2pf_mbox_mapping.vaddr;
489 if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
490 &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
491 RTE_CACHE_LINE_SIZE) != 0)
494 sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
495 sc->pf2vf_bulletin_mapping.vaddr;
497 ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
507 eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
509 PMD_INIT_FUNC_TRACE();
510 return bnx2x_common_dev_init(eth_dev, 0);
514 eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
516 PMD_INIT_FUNC_TRACE();
517 return bnx2x_common_dev_init(eth_dev, 1);
520 static struct eth_driver rte_bnx2x_pmd = {
522 .name = "rte_bnx2x_pmd",
523 .id_table = pci_id_bnx2x_map,
524 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
526 .eth_dev_init = eth_bnx2x_dev_init,
527 .dev_private_size = sizeof(struct bnx2x_softc),
531 * virtual function driver struct
533 static struct eth_driver rte_bnx2xvf_pmd = {
535 .name = "rte_bnx2xvf_pmd",
536 .id_table = pci_id_bnx2xvf_map,
537 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
539 .eth_dev_init = eth_bnx2xvf_dev_init,
540 .dev_private_size = sizeof(struct bnx2x_softc),
543 static int rte_bnx2x_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
545 PMD_INIT_FUNC_TRACE();
546 rte_eth_driver_register(&rte_bnx2x_pmd);
551 static int rte_bnx2xvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
553 PMD_INIT_FUNC_TRACE();
554 rte_eth_driver_register(&rte_bnx2xvf_pmd);
559 static struct rte_driver rte_bnx2x_driver = {
561 .init = rte_bnx2x_pmd_init,
564 static struct rte_driver rte_bnx2xvf_driver = {
566 .init = rte_bnx2xvf_pmd_init,
569 PMD_REGISTER_DRIVER(rte_bnx2x_driver);
570 PMD_REGISTER_DRIVER(rte_bnx2xvf_driver);