2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
4 * Copyright (c) 2015 QLogic Corporation.
8 * See LICENSE.bnx2x_pmd for copyright and licensing details.
12 #include "bnx2x_rxtx.h"
17 * The set of PCI devices this driver supports
19 static struct rte_pci_id pci_id_bnx2x_map[] = {
20 #define RTE_PCI_DEV_ID_DECL_BNX2X(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
21 #include "rte_pci_dev_ids.h"
25 static struct rte_pci_id pci_id_bnx2xvf_map[] = {
26 #define RTE_PCI_DEV_ID_DECL_BNX2XVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
27 #include "rte_pci_dev_ids.h"
32 bnx2x_link_update(struct rte_eth_dev *dev)
34 struct bnx2x_softc *sc = dev->data->dev_private;
36 PMD_INIT_FUNC_TRACE();
37 bnx2x_link_status_update(sc);
39 dev->data->dev_link.link_speed = sc->link_vars.line_speed;
40 switch (sc->link_vars.duplex) {
42 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
45 dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
48 dev->data->dev_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
50 dev->data->dev_link.link_status = sc->link_vars.link_up;
54 bnx2x_interrupt_action(struct rte_eth_dev *dev)
56 struct bnx2x_softc *sc = dev->data->dev_private;
59 PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
61 if (bnx2x_intr_legacy(sc, 0))
63 if (sc->periodic_flags & PERIODIC_GO)
64 bnx2x_periodic_callout(sc);
65 link_status = REG_RD(sc, sc->link_params.shmem_base +
66 offsetof(struct shmem_region,
67 port_mb[sc->link_params.port].link_status));
68 if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
69 bnx2x_link_update(dev);
72 static __rte_unused void
73 bnx2x_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
75 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
77 bnx2x_interrupt_action(dev);
78 rte_intr_enable(&(dev->pci_dev->intr_handle));
82 * Devops - helper functions can be called from user application
86 bnx2x_dev_configure(struct rte_eth_dev *dev)
88 struct bnx2x_softc *sc = dev->data->dev_private;
89 int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
91 PMD_INIT_FUNC_TRACE();
93 if (dev->data->dev_conf.rxmode.jumbo_frame)
94 sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
96 if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
97 PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
101 sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
102 if (sc->num_queues > mp_ncpus) {
103 PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
107 PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
108 sc->num_queues, sc->mtu);
111 if (bnx2x_alloc_ilt_mem(sc) != 0) {
112 PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
116 /* allocate the host hardware/software hsi structures */
117 if (bnx2x_alloc_hsi_mem(sc) != 0) {
118 PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
119 bnx2x_free_ilt_mem(sc);
127 bnx2x_dev_start(struct rte_eth_dev *dev)
129 struct bnx2x_softc *sc = dev->data->dev_private;
132 PMD_INIT_FUNC_TRACE();
134 ret = bnx2x_init(sc);
136 PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
141 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
142 bnx2x_interrupt_handler, (void *)dev);
144 if(rte_intr_enable(&(dev->pci_dev->intr_handle)))
145 PMD_DRV_LOG(ERR, "rte_intr_enable failed");
148 ret = bnx2x_dev_rx_init(dev);
150 PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
154 /* Print important adapter info for the user. */
155 bnx2x_print_adapter_info(sc);
163 bnx2x_dev_stop(struct rte_eth_dev *dev)
165 struct bnx2x_softc *sc = dev->data->dev_private;
168 PMD_INIT_FUNC_TRACE();
171 rte_intr_disable(&(dev->pci_dev->intr_handle));
172 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
173 bnx2x_interrupt_handler, (void *)dev);
176 ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
178 PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
186 bnx2x_dev_close(struct rte_eth_dev *dev)
188 struct bnx2x_softc *sc = dev->data->dev_private;
190 PMD_INIT_FUNC_TRACE();
195 bnx2x_dev_clear_queues(dev);
196 memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
198 /* free the host hardware/software hsi structures */
199 bnx2x_free_hsi_mem(sc);
202 bnx2x_free_ilt_mem(sc);
206 bnx2x_promisc_enable(struct rte_eth_dev *dev)
208 struct bnx2x_softc *sc = dev->data->dev_private;
210 PMD_INIT_FUNC_TRACE();
211 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
212 bnx2x_set_rx_mode(sc);
216 bnx2x_promisc_disable(struct rte_eth_dev *dev)
218 struct bnx2x_softc *sc = dev->data->dev_private;
220 PMD_INIT_FUNC_TRACE();
221 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
222 bnx2x_set_rx_mode(sc);
226 bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
228 struct bnx2x_softc *sc = dev->data->dev_private;
230 PMD_INIT_FUNC_TRACE();
231 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
232 bnx2x_set_rx_mode(sc);
236 bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
238 struct bnx2x_softc *sc = dev->data->dev_private;
240 PMD_INIT_FUNC_TRACE();
241 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
242 bnx2x_set_rx_mode(sc);
246 bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
248 PMD_INIT_FUNC_TRACE();
250 int old_link_status = dev->data->dev_link.link_status;
252 bnx2x_link_update(dev);
254 return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
258 bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
260 int old_link_status = dev->data->dev_link.link_status;
261 struct bnx2x_softc *sc = dev->data->dev_private;
263 bnx2x_link_update(dev);
265 bnx2x_check_bull(sc);
266 if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
267 PMD_DRV_LOG(ERR, "PF indicated channel is down."
268 "VF device is no longer operational");
269 dev->data->dev_link.link_status = 0;
272 return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
276 bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
278 struct bnx2x_softc *sc = dev->data->dev_private;
280 PMD_INIT_FUNC_TRACE();
282 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
284 memset(stats, 0, sizeof (struct rte_eth_stats));
287 HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
288 sc->eth_stats.total_unicast_packets_received_lo) +
289 HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
290 sc->eth_stats.total_multicast_packets_received_lo) +
291 HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
292 sc->eth_stats.total_broadcast_packets_received_lo);
295 HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
296 sc->eth_stats.total_unicast_packets_transmitted_lo) +
297 HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
298 sc->eth_stats.total_multicast_packets_transmitted_lo) +
299 HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
300 sc->eth_stats.total_broadcast_packets_transmitted_lo);
303 HILO_U64(sc->eth_stats.total_bytes_received_hi,
304 sc->eth_stats.total_bytes_received_lo);
307 HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
308 sc->eth_stats.total_bytes_transmitted_lo);
311 HILO_U64(sc->eth_stats.error_bytes_received_hi,
312 sc->eth_stats.error_bytes_received_lo);
317 HILO_U64(sc->eth_stats.no_buff_discard_hi,
318 sc->eth_stats.no_buff_discard_lo);
322 bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
324 struct bnx2x_softc *sc = dev->data->dev_private;
325 dev_info->max_rx_queues = sc->max_rx_queues;
326 dev_info->max_tx_queues = sc->max_tx_queues;
327 dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
328 dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
329 dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
333 bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
334 uint32_t index, uint32_t pool)
336 struct bnx2x_softc *sc = dev->data->dev_private;
338 if (sc->mac_ops.mac_addr_add)
339 sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
343 bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
345 struct bnx2x_softc *sc = dev->data->dev_private;
347 if (sc->mac_ops.mac_addr_remove)
348 sc->mac_ops.mac_addr_remove(dev, index);
351 static const struct eth_dev_ops bnx2x_eth_dev_ops = {
352 .dev_configure = bnx2x_dev_configure,
353 .dev_start = bnx2x_dev_start,
354 .dev_stop = bnx2x_dev_stop,
355 .dev_close = bnx2x_dev_close,
356 .promiscuous_enable = bnx2x_promisc_enable,
357 .promiscuous_disable = bnx2x_promisc_disable,
358 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
359 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
360 .link_update = bnx2x_dev_link_update,
361 .stats_get = bnx2x_dev_stats_get,
362 .dev_infos_get = bnx2x_dev_infos_get,
363 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
364 .rx_queue_release = bnx2x_dev_rx_queue_release,
365 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
366 .tx_queue_release = bnx2x_dev_tx_queue_release,
367 .mac_addr_add = bnx2x_mac_addr_add,
368 .mac_addr_remove = bnx2x_mac_addr_remove,
372 * dev_ops for virtual function
374 static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
375 .dev_configure = bnx2x_dev_configure,
376 .dev_start = bnx2x_dev_start,
377 .dev_stop = bnx2x_dev_stop,
378 .dev_close = bnx2x_dev_close,
379 .promiscuous_enable = bnx2x_promisc_enable,
380 .promiscuous_disable = bnx2x_promisc_disable,
381 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
382 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
383 .link_update = bnx2xvf_dev_link_update,
384 .stats_get = bnx2x_dev_stats_get,
385 .dev_infos_get = bnx2x_dev_infos_get,
386 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
387 .rx_queue_release = bnx2x_dev_rx_queue_release,
388 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
389 .tx_queue_release = bnx2x_dev_tx_queue_release,
390 .mac_addr_add = bnx2x_mac_addr_add,
391 .mac_addr_remove = bnx2x_mac_addr_remove,
396 bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
399 struct rte_pci_device *pci_dev;
400 struct bnx2x_softc *sc;
402 PMD_INIT_FUNC_TRACE();
404 eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
405 pci_dev = eth_dev->pci_dev;
407 rte_eth_copy_pci_info(eth_dev, pci_dev);
409 sc = eth_dev->data->dev_private;
410 sc->pcie_bus = pci_dev->addr.bus;
411 sc->pcie_device = pci_dev->addr.devid;
414 sc->flags = BNX2X_IS_VF_FLAG;
416 sc->devinfo.vendor_id = pci_dev->id.vendor_id;
417 sc->devinfo.device_id = pci_dev->id.device_id;
418 sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
419 sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
421 sc->pcie_func = pci_dev->addr.function;
422 sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
424 sc->bar[BAR1].base_addr = (void *)
425 ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
427 sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
429 assert(sc->bar[BAR0].base_addr);
430 assert(sc->bar[BAR1].base_addr);
432 bnx2x_load_firmware(sc);
433 assert(sc->firmware);
435 if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
438 sc->rx_budget = BNX2X_RX_BUDGET;
439 sc->hc_rx_ticks = BNX2X_RX_TICKS;
440 sc->hc_tx_ticks = BNX2X_TX_TICKS;
442 sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
443 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
445 sc->pci_dev = pci_dev;
446 ret = bnx2x_attach(sc);
448 PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
452 eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
454 PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
455 sc->pcie_bus, sc->pcie_device);
456 PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
457 sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
458 PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
459 PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
460 PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
461 eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
464 if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
465 &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
466 RTE_CACHE_LINE_SIZE) != 0)
469 sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
470 sc->vf2pf_mbox_mapping.vaddr;
472 if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
473 &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
474 RTE_CACHE_LINE_SIZE) != 0)
477 sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
478 sc->pf2vf_bulletin_mapping.vaddr;
480 ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
490 eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
492 PMD_INIT_FUNC_TRACE();
493 return bnx2x_common_dev_init(eth_dev, 0);
497 eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
499 PMD_INIT_FUNC_TRACE();
500 return bnx2x_common_dev_init(eth_dev, 1);
503 static struct eth_driver rte_bnx2x_pmd = {
505 .name = "rte_bnx2x_pmd",
506 .id_table = pci_id_bnx2x_map,
507 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
509 .eth_dev_init = eth_bnx2x_dev_init,
510 .dev_private_size = sizeof(struct bnx2x_softc),
514 * virtual function driver struct
516 static struct eth_driver rte_bnx2xvf_pmd = {
518 .name = "rte_bnx2xvf_pmd",
519 .id_table = pci_id_bnx2xvf_map,
520 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
522 .eth_dev_init = eth_bnx2xvf_dev_init,
523 .dev_private_size = sizeof(struct bnx2x_softc),
526 static int rte_bnx2x_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
528 PMD_INIT_FUNC_TRACE();
529 rte_eth_driver_register(&rte_bnx2x_pmd);
534 static int rte_bnx2xvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
536 PMD_INIT_FUNC_TRACE();
537 rte_eth_driver_register(&rte_bnx2xvf_pmd);
542 static struct rte_driver rte_bnx2x_driver = {
544 .init = rte_bnx2x_pmd_init,
547 static struct rte_driver rte_bnx2xvf_driver = {
549 .init = rte_bnx2xvf_pmd_init,
552 PMD_REGISTER_DRIVER(rte_bnx2x_driver);
553 PMD_REGISTER_DRIVER(rte_bnx2xvf_driver);