#include <sys/types.h>
#include <sys/stat.h>
+#include <arpa/inet.h>
#include <fcntl.h>
#include <zlib.h>
+#include <rte_bitops.h>
+#include <rte_string_fns.h>
+
+#include "eal_firmware.h"
+
#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
#define BNX2X_PMD_VERSION_MAJOR 1
-#define BNX2X_PMD_VERSION_MINOR 0
-#define BNX2X_PMD_VERSION_REVISION 6
+#define BNX2X_PMD_VERSION_MINOR 1
+#define BNX2X_PMD_VERSION_REVISION 0
#define BNX2X_PMD_VERSION_PATCH 1
static inline const char *
static void bnx2x_update_rx_prod(struct bnx2x_softc *sc,
struct bnx2x_fastpath *fp,
uint16_t rx_bd_prod, uint16_t rx_cq_prod);
+static void bnx2x_link_report_locked(struct bnx2x_softc *sc);
static void bnx2x_link_report(struct bnx2x_softc *sc);
void bnx2x_link_status_update(struct bnx2x_softc *sc);
static int bnx2x_alloc_mem(struct bnx2x_softc *sc);
int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
-static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp);
static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
uint8_t storm, uint16_t index, uint8_t op,
uint8_t update);
-int bnx2x_test_bit(int nr, volatile unsigned long *addr)
-{
- int res;
-
- mb();
- res = ((*addr) & (1UL << nr)) != 0;
- mb();
- return res;
-}
-
-void bnx2x_set_bit(unsigned int nr, volatile unsigned long *addr)
-{
- __sync_fetch_and_or(addr, (1UL << nr));
-}
-
-void bnx2x_clear_bit(int nr, volatile unsigned long *addr)
-{
- __sync_fetch_and_and(addr, ~(1UL << nr));
-}
-
-int bnx2x_test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = (1UL << nr);
- return __sync_fetch_and_and(addr, ~mask) & mask;
-}
-
int bnx2x_cmpxchg(volatile int *addr, int old, int new)
{
return __sync_val_compare_and_swap(addr, old, new);
}
dma->paddr = (uint64_t) z->iova;
dma->vaddr = z->addr;
+ dma->mzone = (const void *)z;
PMD_DRV_LOG(DEBUG, sc,
"%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
return 0;
}
+void bnx2x_dma_free(struct bnx2x_dma *dma)
+{
+ if (dma->mzone == NULL)
+ return;
+
+ rte_memzone_free((const struct rte_memzone *)dma->mzone);
+ dma->sc = NULL;
+ dma->paddr = 0;
+ dma->vaddr = NULL;
+ dma->nseg = 0;
+ dma->mzone = NULL;
+}
+
static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
{
uint32_t lock_status;
uint32_t hw_lock_control_reg;
int cnt;
+#ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if (resource)
+ PMD_INIT_FUNC_TRACE(sc);
+#else
PMD_INIT_FUNC_TRACE(sc);
+#endif
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
DELAY(5000);
}
- PMD_DRV_LOG(NOTICE, sc, "Resource lock timeout!");
+ PMD_DRV_LOG(NOTICE, sc, "Resource 0x%x resource_bit 0x%x lock timeout!",
+ resource, resource_bit);
return -1;
}
int func = SC_FUNC(sc);
uint32_t hw_lock_control_reg;
+#ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if (resource)
+ PMD_INIT_FUNC_TRACE(sc);
+#else
PMD_INIT_FUNC_TRACE(sc);
+#endif
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
PMD_DRV_LOG(NOTICE, sc,
- "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE",
- resource);
+ "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
+ " resource_bit 0x%x", resource, resource_bit);
return -1;
}
return 0;
}
+static void bnx2x_acquire_phy_lock(struct bnx2x_softc *sc)
+{
+ BNX2X_PHY_LOCK(sc);
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
+}
+
+static void bnx2x_release_phy_lock(struct bnx2x_softc *sc)
+{
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
+ BNX2X_PHY_UNLOCK(sc);
+}
+
/* copy command into DMAE command memory and set DMAE command Go */
void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx)
{
atomic_load_acq_long(&sc->cq_spq_left),
atomic_load_acq_long(&sc->eq_spq_left));
+ /* RAMROD completion is processed in bnx2x_intr_legacy()
+ * which can run from different contexts.
+ * Ask bnx2x_intr_intr() to process RAMROD
+ * completion whenever it gets scheduled.
+ */
+ rte_atomic32_set(&sc->scan_fp, 1);
bnx2x_sp_prod_update(sc);
return 0;
if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) ==
MAX_RCQ_ENTRIES(rxq)))
rx_cq_cons_sb++;
+
+ PMD_RX_LOG(DEBUG, "hw CQ cons = %d, sw CQ cons = %d",
+ rx_cq_cons_sb, rxq->rx_cq_head);
+
return rxq->rx_cq_head != rx_cq_cons_sb;
}
uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
+ rte_spinlock_lock(&(fp)->rx_mtx);
+
rxq = sc->rx_queues[fp->index];
if (!rxq) {
PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index);
+ rte_spinlock_unlock(&(fp)->rx_mtx);
return 0;
}
rxq->rx_cq_head = sw_cq_cons;
rxq->rx_cq_tail = sw_cq_prod;
+ PMD_RX_LOG(DEBUG, "BD prod = %d, sw CQ prod = %d",
+ bd_prod_fw, sw_cq_prod);
+
/* Update producers */
bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod);
+ rte_spinlock_unlock(&(fp)->rx_mtx);
+
return sw_cq_cons != hw_cq_cons;
}
bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj,
int mac_type, uint8_t wait_for_comp)
{
- unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+ uint32_t ramrod_flags = 0, vlan_mac_flags = 0;
int rc;
/* wait for completion of requested */
if (wait_for_comp) {
- bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags);
}
/* Set the mac type of addresses we want to clear */
- bnx2x_set_bit(mac_type, &vlan_mac_flags);
+ rte_bit_relaxed_set32(mac_type, &vlan_mac_flags);
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
if (rc < 0)
static int
bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
- unsigned long *rx_accept_flags,
- unsigned long *tx_accept_flags)
+ uint32_t *rx_accept_flags, uint32_t *tx_accept_flags)
{
/* Clear the flags first */
*rx_accept_flags = 0;
break;
case BNX2X_RX_MODE_NORMAL:
- bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_ALLMULTI:
- bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST,
+ rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST,
+ tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
break;
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
- bnx2x_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST, rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST,
+ rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_MULTICAST,
+ tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
if (IS_MF_SI(sc)) {
- bnx2x_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_ALL_UNICAST,
+ tx_accept_flags);
} else {
- bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_UNICAST,
+ tx_accept_flags);
}
break;
/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
if (rx_mode != BNX2X_RX_MODE_NONE) {
- bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
- bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
+ rte_bit_relaxed_set32(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
}
return 0;
ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata);
ramrod_param.rdata_mapping =
(rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
- bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
+ rte_bit_relaxed_set32(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
ramrod_param.ramrod_flags = ramrod_flags;
ramrod_param.rx_mode_flags = rx_mode_flags;
int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc)
{
- unsigned long rx_mode_flags = 0, ramrod_flags = 0;
- unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ uint32_t rx_mode_flags = 0, ramrod_flags = 0;
+ uint32_t rx_accept_flags = 0, tx_accept_flags = 0;
int rc;
rc = bnx2x_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
return rc;
}
- bnx2x_set_bit(RAMROD_RX, &ramrod_flags);
- bnx2x_set_bit(RAMROD_TX, &ramrod_flags);
- bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_RX, &ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_TX, &ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags);
return bnx2x_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
rx_accept_flags, tx_accept_flags,
"Forcing STARTED-->TX_STOPPED-->STARTED");
func_params.f_obj = &sc->func_obj;
- bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY,
+ &func_params.ramrod_flags);
/* STARTED-->TX_STOPPED */
func_params.cmd = ECORE_F_CMD_TX_STOP;
q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
/* We want to wait for completion in this context */
- bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
/* Stop the primary connection: */
}
/* wait for the outstanding SP commands */
-static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, unsigned long mask)
+static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, uint32_t mask)
{
- unsigned long tmp;
+ uint32_t tmp;
int tout = 5000; /* wait for 5 secs tops */
while (tout--) {
mb();
- if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
+ if (!(atomic_load_acq_int(&sc->sp_state) & mask))
return TRUE;
- }
DELAY(1000);
}
mb();
- tmp = atomic_load_acq_long(&sc->sp_state);
+ tmp = atomic_load_acq_int(&sc->sp_state);
if (tmp & mask) {
PMD_DRV_LOG(INFO, sc, "Filtering completion timed out: "
- "sp_state 0x%lx, mask 0x%lx", tmp, mask);
+ "sp_state 0x%x, mask 0x%x", tmp, mask);
return FALSE;
}
int rc;
/* prepare parameters for function state transitions */
- bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
func_params.f_obj = &sc->func_obj;
func_params.cmd = ECORE_F_CMD_STOP;
if (rc) {
PMD_DRV_LOG(NOTICE, sc, "FUNC_STOP ramrod failed. "
"Running a dry transaction");
- bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY,
+ &func_params.ramrod_flags);
return ecore_func_state_change(sc, &func_params);
}
struct ecore_func_state_params func_params = { NULL };
/* Prepare parameters for function state transitions */
- bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
func_params.f_obj = &sc->func_obj;
func_params.cmd = ECORE_F_CMD_HW_RESET;
* a race between the completion code and this code.
*/
- if (bnx2x_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
- bnx2x_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
- } else {
+ if (rte_bit_relaxed_get32(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state))
+ rte_bit_relaxed_set32(ECORE_FILTER_RX_MODE_SCHED,
+ &sc->sp_state);
+ else
bnx2x_set_storm_rx_mode(sc);
- }
/* Clean up multicast configuration */
rparam.mcast_obj = &sc->mcast_obj;
* If SP settings didn't get completed so far - something
* very wrong has happen.
*/
- if (!bnx2x_wait_sp_comp(sc, ~0x0UL)) {
+ if (!bnx2x_wait_sp_comp(sc, ~0x0U))
PMD_DRV_LOG(NOTICE, sc, "Common slow path ramrods got stuck!");
- }
unload_error:
*/
static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
{
- unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+ uint32_t ramrod_flags = 0, vlan_mac_flags = 0;
struct ecore_mcast_ramrod_params rparam = { NULL };
struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
int rc;
/* Cleanup MACs' object first... */
/* Wait for completion of requested */
- bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags);
/* Perform a dry cleanup */
- bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
/* Clean ETH primary MAC */
- bnx2x_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
+ rte_bit_relaxed_set32(ECORE_ETH_MAC, &vlan_mac_flags);
rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0) {
/* Cleanup UC list */
vlan_mac_flags = 0;
- bnx2x_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
+ rte_bit_relaxed_set32(ECORE_UC_LIST_MAC, &vlan_mac_flags);
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
if (rc != 0) {
PMD_DRV_LOG(NOTICE, sc,
/* Now clean mcast object... */
rparam.mcast_obj = &sc->mcast_obj;
- bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
/* Add a DEL command... */
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
uint8_t global = FALSE;
uint32_t val;
+ PMD_INIT_FUNC_TRACE(sc);
+
PMD_DRV_LOG(DEBUG, sc, "Starting NIC unload...");
/* mark driver as unloaded in shmem2 */
bnx2x_free_mem(sc);
}
+ /* free the host hardware/software hsi structures */
+ bnx2x_free_hsi_mem(sc);
+
bnx2x_free_fw_stats_mem(sc);
sc->state = BNX2X_STATE_CLOSED;
tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
- tx_start_bd->addr =
- rte_cpu_to_le_64(rte_mbuf_data_iova(m0));
+ tx_start_bd->addr_lo =
+ rte_cpu_to_le_32(U64_LO(rte_mbuf_data_iova(m0)));
+ tx_start_bd->addr_hi =
+ rte_cpu_to_le_32(U64_HI(rte_mbuf_data_iova(m0)));
tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_start_bd->general_data =
tx_start_bd->vlan_or_ethertype =
rte_cpu_to_le_16(pkt_prod);
else {
- struct ether_hdr *eh =
- rte_pktmbuf_mtod(m0, struct ether_hdr *);
-
- tx_start_bd->vlan_or_ethertype =
- rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type));
+ /* when transmitting in a vf, start bd
+ * must hold the ethertype for fw to enforce it
+ */
+ struct rte_ether_hdr *eh =
+ rte_pktmbuf_mtod(m0, struct rte_ether_hdr *);
+
+ /* Still need to consider inband vlan for enforced */
+ if (eh->ether_type ==
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
+ struct rte_vlan_hdr *vh =
+ (struct rte_vlan_hdr *)(eh + 1);
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_INBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ tx_start_bd->vlan_or_ethertype =
+ rte_cpu_to_le_16(ntohs(vh->vlan_tci));
+ } else {
+ tx_start_bd->vlan_or_ethertype =
+ (rte_cpu_to_le_16
+ (rte_be_to_cpu_16(eh->ether_type)));
+ }
}
}
bd_prod = NEXT_TX_BD(bd_prod);
if (IS_VF(sc)) {
struct eth_tx_parse_bd_e2 *tx_parse_bd;
- const struct ether_hdr *eh =
- rte_pktmbuf_mtod(m0, struct ether_hdr *);
+ const struct rte_ether_hdr *eh =
+ rte_pktmbuf_mtod(m0, struct rte_ether_hdr *);
uint8_t mac_type = UNICAST_ADDRESS;
tx_parse_bd =
&txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2;
- if (is_multicast_ether_addr(&eh->d_addr)) {
- if (is_broadcast_ether_addr(&eh->d_addr))
+ if (rte_is_multicast_ether_addr(&eh->d_addr)) {
+ if (rte_is_broadcast_ether_addr(&eh->d_addr))
mac_type = BROADCAST_ADDRESS;
else
mac_type = MULTICAST_ADDRESS;
static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc)
{
+ bnx2x_dma_free(&sc->fw_stats_dma);
sc->fw_stats_num = 0;
sc->fw_stats_req_size = 0;
}
}
- bnx2x_link_report(sc);
+ bnx2x_link_report_locked(sc);
if (IS_MF(sc)) {
bnx2x_link_sync_notify(sc);
if (asserted & ATTN_HARD_WIRED_MASK) {
if (asserted & ATTN_NIG_FOR_FUNC) {
+ bnx2x_acquire_phy_lock(sc);
/* save nig interrupt mask */
nig_mask = REG_RD(sc, nig_int_mask_addr);
REG_WR(sc, nig_int_mask_addr, nig_mask);
+ bnx2x_release_phy_lock(sc);
}
}
if (sc->link_vars.periodic_flags &
ELINK_PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
+ bnx2x_acquire_phy_lock(sc);
sc->link_vars.periodic_flags &=
~ELINK_PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_release_phy_lock(sc);
if (IS_MF(sc)) {
bnx2x_link_sync_notify(sc);
}
}
if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
+ bnx2x_acquire_phy_lock(sc);
elink_handle_module_detect_int(&sc->link_params);
+ bnx2x_release_phy_lock(sc);
}
if (attn & HW_INTERRUT_ASSERT_SET_0) {
REG_WR(sc, reg_offset, val);
rte_panic("FATAL HW block attention set0 0x%lx",
- (attn & HW_INTERRUT_ASSERT_SET_0));
+ (attn & (unsigned long)HW_INTERRUT_ASSERT_SET_0));
}
}
struct host_sp_status_block *def_sb = sc->def_sb;
uint16_t rc = 0;
+ if (!def_sb)
+ return 0;
+
mb(); /* status block is written to by the chip */
if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
static void
bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *elem)
{
- unsigned long ramrod_flags = 0;
+ uint32_t ramrod_flags = 0;
int rc = 0;
uint32_t cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
struct ecore_vlan_mac_obj *vlan_mac_obj;
/* always push next commands out, don't wait here */
- bnx2x_set_bit(RAMROD_CONT, &ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_CONT, &ramrod_flags);
switch (le32toh(elem->message.data.eth_event.echo) >> BNX2X_SWCID_SHIFT) {
case ECORE_FILTER_MAC_PENDING:
static void bnx2x_handle_rx_mode_eqe(struct bnx2x_softc *sc)
{
- bnx2x_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
+ rte_bit_relaxed_clear32(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
/* send rx_mode command again if was requested */
- if (bnx2x_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) {
+ if (rte_bit_relaxed_test_and_clear32(ECORE_FILTER_RX_MODE_SCHED,
+ &sc->sp_state))
bnx2x_set_storm_rx_mode(sc);
- }
}
static void bnx2x_update_eq_prod(struct bnx2x_softc *sc, uint16_t prod)
return rc;
}
-static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp)
{
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
+ /* Make sure FP is initialized */
+ if (!fp->sb_running_index)
+ return;
+
PMD_DEBUG_PERIODIC_LOG(DEBUG, sc,
"---> FP TASK QUEUE (%d) <--", fp->index);
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
- if (scan_fp) {
+ if (rte_atomic32_read(&sc->scan_fp) == 1) {
if (bnx2x_has_rx_work(fp)) {
more_rx = bnx2x_rxeof(sc, fp);
}
if (more_rx) {
/* still more work to do */
- bnx2x_handle_fp_tq(fp, scan_fp);
+ bnx2x_handle_fp_tq(fp);
return;
}
+ /* We have completed slow path completion, clear the flag */
+ rte_atomic32_set(&sc->scan_fp, 0);
}
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
* then calls a separate routine to handle the various
* interrupt causes: link, RX, and TX.
*/
-int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp)
+int bnx2x_intr_legacy(struct bnx2x_softc *sc)
{
struct bnx2x_fastpath *fp;
uint32_t status, mask;
fp = &sc->fp[i];
mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
if (status & mask) {
- bnx2x_handle_fp_tq(fp, scan_fp);
+ /* acknowledge and disable further fastpath interrupts */
+ bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
+ 0, IGU_INT_DISABLE, 0);
+ bnx2x_handle_fp_tq(fp);
status &= ~mask;
}
}
if (unlikely(status & 0x1)) {
+ /* acknowledge and disable further slowpath interrupts */
+ bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
+ 0, IGU_INT_DISABLE, 0);
rc = bnx2x_handle_sp_tq(sc);
status &= ~0x1;
}
PMD_INIT_FUNC_TRACE(sc);
/* prepare the parameters for function state transitions */
- bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
func_params.f_obj = &sc->func_obj;
func_params.cmd = ECORE_F_CMD_HW_INIT;
{
struct bnx2x_fastpath *fp = &sc->fp[idx];
uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
- unsigned long q_type = 0;
+ uint32_t q_type = 0;
int cos;
fp->sc = sc;
bnx2x_update_fp_sb_idx(fp);
/* Configure Queue State object */
- bnx2x_set_bit(ECORE_Q_TYPE_HAS_RX, &q_type);
- bnx2x_set_bit(ECORE_Q_TYPE_HAS_TX, &q_type);
+ rte_bit_relaxed_set32(ECORE_Q_TYPE_HAS_RX, &q_type);
+ rte_bit_relaxed_set32(ECORE_Q_TYPE_HAS_TX, &q_type);
ecore_init_queue_obj(sc,
&sc->sp_objs[idx].q_obj,
bnx2x_update_rx_prod(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
uint16_t rx_bd_prod, uint16_t rx_cq_prod)
{
- union ustorm_eth_rx_producers rx_prods;
+ struct ustorm_eth_rx_producers rx_prods;
uint32_t i;
+ memset(&rx_prods, 0, sizeof(rx_prods));
+
/* update producers */
- rx_prods.prod.bd_prod = rx_bd_prod;
- rx_prods.prod.cqe_prod = rx_cq_prod;
- rx_prods.prod.reserved = 0;
+ rx_prods.bd_prod = rx_bd_prod;
+ rx_prods.cqe_prod = rx_cq_prod;
/*
* Make sure that the BD and SGE data is updated before updating the
wmb();
for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
- REG_WR(sc,
- (fp->ustorm_rx_prods_offset + (i * 4)),
- rx_prods.raw_data[i]);
+ REG_WR(sc, (fp->ustorm_rx_prods_offset + (i * 4)),
+ ((uint32_t *)&rx_prods)[i]);
}
wmb(); /* keep prod updates ordered */
{
int i;
- if (IS_MF_SI(sc)) {
-/*
- * In switch independent mode, the TSTORM needs to accept
- * packets that failed classification, since approximate match
- * mac addresses aren't written to NIG LLH.
- */
- REG_WR8(sc,
- (BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 2);
- } else
- REG_WR8(sc,
- (BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 0);
-
/*
* Zero this manually as its initialization is currently missing
* in the initTool.
VNICS_PER_PATH(sc));
/* RSS configuration object */
- ecore_init_rss_config_obj(&sc->rss_conf_obj,
- sc->fp[0].cl_id,
- sc->fp[0].index,
- SC_FUNC(sc),
- SC_FUNC(sc),
+ ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp->cl_id,
+ sc->fp->index, SC_FUNC(sc), SC_FUNC(sc),
BNX2X_SP(sc, rss_rdata),
(rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata),
- ECORE_FILTER_RSS_CONF_PENDING,
- &sc->sp_state, ECORE_OBJ_TYPE_RX);
+ ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state,
+ ECORE_OBJ_TYPE_RX);
}
/*
&func_params.params.start;
/* Prepare parameters for function state transitions */
- bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
func_params.f_obj = &sc->func_obj;
func_params.cmd = ECORE_F_CMD_START;
start_params->network_cos_mode = FW_WRR;
}
- start_params->gre_tunnel_mode = 0;
- start_params->gre_tunnel_rss = 0;
-
return ecore_func_state_change(sc, &func_params);
}
/* If there is no power capability, silently succeed */
if (!(sc->devinfo.pcie_cap_flags & BNX2X_PM_CAPABLE_FLAG)) {
- PMD_DRV_LOG(WARNING, sc, "No power capability");
+ PMD_DRV_LOG(INFO, sc, "No power capability");
return 0;
}
uint8_t cos;
int cxt_index, cxt_offset;
- bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
- bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_HC, &init_params->rx.flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_HC, &init_params->tx.flags);
- bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
- bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
/* HC rate */
init_params->rx.hc_rate =
static unsigned long
bnx2x_get_common_flags(struct bnx2x_softc *sc, uint8_t zero_stats)
{
- unsigned long flags = 0;
+ uint32_t flags = 0;
/* PF driver will always initialize the Queue to an ACTIVE state */
- bnx2x_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_ACTIVE, &flags);
/*
* tx only connections collect statistics (on the same index as the
* connection is initialized.
*/
- bnx2x_set_bit(ECORE_Q_FLG_STATS, &flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_STATS, &flags);
if (zero_stats) {
- bnx2x_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_ZERO_STATS, &flags);
}
/*
* CoS-ness doesn't survive the loopback
*/
if (sc->flags & BNX2X_TX_SWITCHING) {
- bnx2x_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_TX_SWITCH, &flags);
}
- bnx2x_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
return flags;
}
static unsigned long bnx2x_get_q_flags(struct bnx2x_softc *sc, uint8_t leading)
{
- unsigned long flags = 0;
+ uint32_t flags = 0;
if (IS_MF_SD(sc)) {
- bnx2x_set_bit(ECORE_Q_FLG_OV, &flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_OV, &flags);
}
if (leading) {
- bnx2x_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
- bnx2x_set_bit(ECORE_Q_FLG_MCAST, &flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_LEADING_RSS, &flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_MCAST, &flags);
}
- bnx2x_set_bit(ECORE_Q_FLG_VLAN, &flags);
+ rte_bit_relaxed_set32(ECORE_Q_FLG_VLAN, &flags);
/* merge with common flags */
return flags | bnx2x_get_common_flags(sc, TRUE);
q_params.q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj;
/* we want to wait for completion in this context */
- bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
/* prepare the INIT parameters */
bnx2x_pf_q_prep_init(sc, fp, &q_params.params.init);
params.rss_obj = rss_obj;
- bnx2x_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
- bnx2x_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags);
+ rte_bit_relaxed_set32(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags);
/* RSS configuration */
- bnx2x_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags);
- bnx2x_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags);
- bnx2x_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags);
- bnx2x_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags);
+ rte_bit_relaxed_set32(ECORE_RSS_IPV4, ¶ms.rss_flags);
+ rte_bit_relaxed_set32(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags);
+ rte_bit_relaxed_set32(ECORE_RSS_IPV6, ¶ms.rss_flags);
+ rte_bit_relaxed_set32(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags);
if (rss_obj->udp_rss_v4) {
- bnx2x_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags);
+ rte_bit_relaxed_set32(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags);
}
if (rss_obj->udp_rss_v6) {
- bnx2x_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags);
+ rte_bit_relaxed_set32(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags);
}
/* Hash bits */
params.rss_key[i] = (uint32_t) rte_rand();
}
- bnx2x_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags);
+ rte_bit_relaxed_set32(ECORE_RSS_SET_SRCH, ¶ms.rss_flags);
}
if (IS_PF(sc))
static int
bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac,
struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type,
- unsigned long *ramrod_flags)
+ uint32_t *ramrod_flags)
{
struct ecore_vlan_mac_ramrod_params ramrod_param;
int rc;
ramrod_param.ramrod_flags = *ramrod_flags;
/* fill a user request section if needed */
- if (!bnx2x_test_bit(RAMROD_CONT, ramrod_flags)) {
+ if (!rte_bit_relaxed_get32(RAMROD_CONT, ramrod_flags)) {
rte_memcpy(ramrod_param.user_req.u.mac.mac, mac,
ETH_ALEN);
- bnx2x_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
+ rte_bit_relaxed_set32(mac_type,
+ &ramrod_param.user_req.vlan_mac_flags);
/* Set the command: ADD or DEL */
ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
static int bnx2x_set_eth_mac(struct bnx2x_softc *sc, uint8_t set)
{
- unsigned long ramrod_flags = 0;
+ uint32_t ramrod_flags = 0;
PMD_DRV_LOG(DEBUG, sc, "Adding Ethernet MAC");
- bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rte_bit_relaxed_set32(RAMROD_COMP_WAIT, &ramrod_flags);
/* Eth MAC is set on RSS leading client (fp[0]) */
return bnx2x_set_mac_one(sc, sc->link_params.mac_addr,
/* Link is down */
if (!sc->link_vars.link_up || (sc->flags & BNX2X_MF_FUNC_DIS)) {
- bnx2x_set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ rte_bit_relaxed_set32(BNX2X_LINK_REPORT_LINK_DOWN,
&data->link_report_flags);
}
/* Full DUPLEX */
if (sc->link_vars.duplex == DUPLEX_FULL) {
- bnx2x_set_bit(BNX2X_LINK_REPORT_FULL_DUPLEX,
+ rte_bit_relaxed_set32(BNX2X_LINK_REPORT_FULL_DUPLEX,
&data->link_report_flags);
}
/* Rx Flow Control is ON */
if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
- bnx2x_set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
+ rte_bit_relaxed_set32(BNX2X_LINK_REPORT_RX_FC_ON,
+ &data->link_report_flags);
}
/* Tx Flow Control is ON */
if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
- bnx2x_set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
+ rte_bit_relaxed_set32(BNX2X_LINK_REPORT_TX_FC_ON,
+ &data->link_report_flags);
}
}
/* report link status to OS, should be called under phy_lock */
-static void bnx2x_link_report(struct bnx2x_softc *sc)
+static void bnx2x_link_report_locked(struct bnx2x_softc *sc)
{
struct bnx2x_link_report_data cur_data;
/* Don't report link down or exactly the same link status twice */
if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
- (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ (rte_bit_relaxed_get32(BNX2X_LINK_REPORT_LINK_DOWN,
&sc->last_reported_link.link_report_flags) &&
- bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ rte_bit_relaxed_get32(BNX2X_LINK_REPORT_LINK_DOWN,
&cur_data.link_report_flags))) {
return;
}
+ ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x",
+ cur_data.link_report_flags,
+ sc->last_reported_link.link_report_flags);
+
sc->link_cnt++;
+ ELINK_DEBUG_P1(sc, "link status change count = %x", sc->link_cnt);
/* report new link params and remember the state for the next time */
rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
- if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ if (rte_bit_relaxed_get32(BNX2X_LINK_REPORT_LINK_DOWN,
&cur_data.link_report_flags)) {
- PMD_DRV_LOG(INFO, sc, "NIC Link is Down");
+ ELINK_DEBUG_P0(sc, "NIC Link is Down");
} else {
__rte_unused const char *duplex;
__rte_unused const char *flow;
- if (bnx2x_test_and_clear_bit(BNX2X_LINK_REPORT_FULL_DUPLEX,
- &cur_data.link_report_flags)) {
+ if (rte_bit_relaxed_test_and_clear32
+ (BNX2X_LINK_REPORT_FULL_DUPLEX,
+ &cur_data.link_report_flags)) {
duplex = "full";
+ ELINK_DEBUG_P0(sc, "link set to full duplex");
} else {
duplex = "half";
+ ELINK_DEBUG_P0(sc, "link set to half duplex");
}
/*
* enabled.
*/
if (cur_data.link_report_flags) {
- if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ if (rte_bit_relaxed_get32
+ (BNX2X_LINK_REPORT_RX_FC_ON,
&cur_data.link_report_flags) &&
- bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ rte_bit_relaxed_get32(BNX2X_LINK_REPORT_TX_FC_ON,
&cur_data.link_report_flags)) {
flow = "ON - receive & transmit";
- } else if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
- &cur_data.link_report_flags) &&
- !bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ } else if (rte_bit_relaxed_get32
+ (BNX2X_LINK_REPORT_RX_FC_ON,
+ &cur_data.link_report_flags) &&
+ !rte_bit_relaxed_get32
+ (BNX2X_LINK_REPORT_TX_FC_ON,
&cur_data.link_report_flags)) {
flow = "ON - receive";
- } else if (!bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ } else if (!rte_bit_relaxed_get32
+ (BNX2X_LINK_REPORT_RX_FC_ON,
&cur_data.link_report_flags) &&
- bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
- &cur_data.link_report_flags)) {
+ rte_bit_relaxed_get32
+ (BNX2X_LINK_REPORT_TX_FC_ON,
+ &cur_data.link_report_flags)) {
flow = "ON - transmit";
} else {
flow = "none"; /* possible? */
}
}
+static void
+bnx2x_link_report(struct bnx2x_softc *sc)
+{
+ bnx2x_acquire_phy_lock(sc);
+ bnx2x_link_report_locked(sc);
+ bnx2x_release_phy_lock(sc);
+}
+
void bnx2x_link_status_update(struct bnx2x_softc *sc)
{
if (sc->state != BNX2X_STATE_OPEN) {
}
bnx2x_link_report(sc);
} else {
- bnx2x_link_report(sc);
+ bnx2x_link_report_locked(sc);
bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP);
}
}
bnx2x_set_requested_fc(sc);
+ bnx2x_acquire_phy_lock(sc);
+
if (load_mode == LOAD_DIAG) {
lp->loopback_mode = ELINK_LOOPBACK_XGXS;
/* Prefer doing PHY loopback at 10G speed, if possible */
rc = elink_phy_init(&sc->link_params, &sc->link_vars);
+ bnx2x_release_phy_lock(sc);
+
bnx2x_calc_fc_adv(sc);
if (sc->link_vars.link_up) {
{
if ((sc->state != BNX2X_STATE_OPEN) ||
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
- PMD_DRV_LOG(INFO, sc, "periodic callout exit (state=0x%x)",
+ PMD_DRV_LOG(DEBUG, sc, "periodic callout exit (state=0x%x)",
sc->state);
return;
}
*/
mb();
if (sc->port.pmf) {
+ bnx2x_acquire_phy_lock(sc);
elink_period_func(&sc->link_params, &sc->link_vars);
+ bnx2x_release_phy_lock(sc);
}
}
#ifdef BNX2X_PULSE
}
}
+ /* allocate the host hardware/software hsi structures */
+ if (bnx2x_alloc_hsi_mem(sc) != 0) {
+ PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed");
+ sc->state = BNX2X_STATE_CLOSED;
+ rc = -ENOMEM;
+ goto bnx2x_nic_load_error0;
+ }
+
if (bnx2x_alloc_fw_stats_mem(sc) != 0) {
sc->state = BNX2X_STATE_CLOSED;
rc = -ENOMEM;
bnx2x_set_rx_mode(sc);
/* wait for all pending SP commands to complete */
- if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0UL)) {
+ if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0U)) {
PMD_DRV_LOG(NOTICE, sc, "Timeout waiting for all SPs to complete!");
bnx2x_periodic_stop(sc);
bnx2x_nic_unload(sc, UNLOAD_CLOSE, FALSE);
bnx2x_nic_load_error0:
bnx2x_free_fw_stats_mem(sc);
+ bnx2x_free_hsi_mem(sc);
bnx2x_free_mem(sc);
return rc;
~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
}
+ val = sc->devinfo.bc_ver >> 8;
+ if (val < BNX2X_BC_VER) {
+ /* for now only warn later we might need to enforce this */
+ PMD_DRV_LOG(NOTICE, sc, "This driver needs bc_ver %X but found %X, please upgrade BC\n",
+ BNX2X_BC_VER, val);
+ }
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY :
+ 0;
+
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
/* get the initial value of the link params */
sc->link_params.multi_phy_config =
SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
((sc->devinfo.bc_ver >> 24) & 0xff),
((sc->devinfo.bc_ver >> 16) & 0xff),
((sc->devinfo.bc_ver >> 8) & 0xff));
- PMD_DRV_LOG(INFO, sc, "Bootcode version: %s", sc->devinfo.bc_ver_str);
+ PMD_DRV_LOG(DEBUG, sc, "Bootcode version: %s", sc->devinfo.bc_ver_str);
/* get the bootcode shmem address */
sc->devinfo.mf_cfg_base = bnx2x_get_shmem_mf_cfg_base(sc);
uint32_t i;
if (IS_PF(sc)) {
-/************************/
-/* DEFAULT STATUS BLOCK */
-/************************/
+ /************************/
+ /* DEFAULT STATUS BLOCK */
+ /************************/
if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block),
&sc->def_sb_dma, "def_sb",
sc->def_sb =
(struct host_sp_status_block *)sc->def_sb_dma.vaddr;
-/***************/
-/* EVENT QUEUE */
-/***************/
+ /***************/
+ /* EVENT QUEUE */
+ /***************/
if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE,
&sc->eq_dma, "ev_queue",
sc->eq = (union event_ring_elem *)sc->eq_dma.vaddr;
-/*************/
-/* SLOW PATH */
-/*************/
+ /*************/
+ /* SLOW PATH */
+ /*************/
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath),
&sc->sp_dma, "sp",
sc->sp = (struct bnx2x_slowpath *)sc->sp_dma.vaddr;
-/*******************/
-/* SLOW PATH QUEUE */
-/*******************/
+ /*******************/
+ /* SLOW PATH QUEUE */
+ /*******************/
if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE,
&sc->spq_dma, "sp_queue",
sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
-/***************************/
-/* FW DECOMPRESSION BUFFER */
-/***************************/
+ /***************************/
+ /* FW DECOMPRESSION BUFFER */
+ /***************************/
if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
"fw_buf", RTE_CACHE_LINE_SIZE) != 0) {
fp->sc = sc;
fp->index = i;
-/*******************/
-/* FP STATUS BLOCK */
-/*******************/
+ /*******************/
+ /* FP STATUS BLOCK */
+ /*******************/
snprintf(buf, sizeof(buf), "fp_%d_sb", i);
if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block),
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
-/*******************/
-/* FP STATUS BLOCK */
-/*******************/
+ /*******************/
+ /* FP STATUS BLOCK */
+ /*******************/
memset(&fp->status_block, 0, sizeof(fp->status_block));
+ bnx2x_dma_free(&fp->sb_dma);
}
- /***************************/
- /* FW DECOMPRESSION BUFFER */
- /***************************/
+ if (IS_PF(sc)) {
+ /***************************/
+ /* FW DECOMPRESSION BUFFER */
+ /***************************/
- sc->gz_buf = NULL;
+ bnx2x_dma_free(&sc->gz_buf_dma);
+ sc->gz_buf = NULL;
- /*******************/
- /* SLOW PATH QUEUE */
- /*******************/
+ /*******************/
+ /* SLOW PATH QUEUE */
+ /*******************/
- sc->spq = NULL;
+ bnx2x_dma_free(&sc->spq_dma);
+ sc->spq = NULL;
- /*************/
- /* SLOW PATH */
- /*************/
+ /*************/
+ /* SLOW PATH */
+ /*************/
- sc->sp = NULL;
+ bnx2x_dma_free(&sc->sp_dma);
+ sc->sp = NULL;
- /***************/
- /* EVENT QUEUE */
- /***************/
+ /***************/
+ /* EVENT QUEUE */
+ /***************/
- sc->eq = NULL;
+ bnx2x_dma_free(&sc->eq_dma);
+ sc->eq = NULL;
- /************************/
- /* DEFAULT STATUS BLOCK */
- /************************/
-
- sc->def_sb = NULL;
+ /************************/
+ /* DEFAULT STATUS BLOCK */
+ /************************/
+ bnx2x_dma_free(&sc->def_sb_dma);
+ sc->def_sb = NULL;
+ }
}
/*
uint32_t fw, hw_lock_reg, hw_lock_val;
uint32_t rc = 0;
+ PMD_INIT_FUNC_TRACE(sc);
+
/*
* Clear HW from errors which may have resulted from an interrupted
* DMAE transaction.
bnx2x_prev_interrupted_dmae(sc);
/* Release previously held locks */
- if (SC_FUNC(sc) <= 5)
- hw_lock_reg = (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8);
- else
- hw_lock_reg =
- (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
+ hw_lock_reg = (SC_FUNC(sc) <= 5) ?
+ (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
+ (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
hw_lock_val = (REG_RD(sc, hw_lock_reg));
if (hw_lock_val) {
if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
+ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held NVRAM lock\n");
REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
(MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
}
+ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held HW lock\n");
REG_WR(sc, hw_lock_reg, 0xffffffff);
}
if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
+ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held ALR\n");
REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
}
return -ENOMEM;
}
-#ifndef __FreeBSD__
+#ifndef RTE_EXEC_ENV_FREEBSD
pci_read(sc, PCI_STATUS, &status, 2);
if (!(status & PCI_STATUS_CAP_LIST)) {
#else
return -1;
}
-#ifndef __FreeBSD__
+#ifndef RTE_EXEC_ENV_FREEBSD
pci_read(sc, PCI_CAPABILITY_LIST, &pci_cap.next, 1);
#else
pci_read(sc, PCIR_CAP_PTR, &pci_cap.next, 1);
}
#define FW_HEADER_LEN 104
-#define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.2.51.0.fw"
-#define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.2.51.0.fw"
+#define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.13.11.0.fw"
+#define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.13.11.0.fw"
void bnx2x_load_firmware(struct bnx2x_softc *sc)
{
const char *fwname;
- int f;
- struct stat st;
+ void *buf;
+ size_t bufsz;
fwname = sc->devinfo.device_id == CHIP_NUM_57711
? FW_NAME_57711 : FW_NAME_57810;
- f = open(fwname, O_RDONLY);
- if (f < 0) {
+ if (rte_firmware_read(fwname, &buf, &bufsz) != 0) {
PMD_DRV_LOG(NOTICE, sc, "Can't open firmware file");
return;
}
- if (fstat(f, &st) < 0) {
- PMD_DRV_LOG(NOTICE, sc, "Can't stat firmware file");
- close(f);
- return;
- }
-
- sc->firmware = rte_zmalloc("bnx2x_fw", st.st_size, RTE_CACHE_LINE_SIZE);
+ sc->firmware = rte_zmalloc("bnx2x_fw", bufsz, RTE_CACHE_LINE_SIZE);
if (!sc->firmware) {
PMD_DRV_LOG(NOTICE, sc, "Can't allocate memory for firmware");
- close(f);
- return;
+ goto out;
}
- if (read(f, sc->firmware, st.st_size) != st.st_size) {
- PMD_DRV_LOG(NOTICE, sc, "Can't read firmware data");
- close(f);
- return;
- }
- close(f);
-
- sc->fw_len = st.st_size;
+ sc->fw_len = bufsz;
if (sc->fw_len < FW_HEADER_LEN) {
PMD_DRV_LOG(NOTICE, sc,
"Invalid fw size: %" PRIu64, sc->fw_len);
- return;
+ goto out;
}
+
+ memcpy(sc->firmware, buf, sc->fw_len);
PMD_DRV_LOG(DEBUG, sc, "fw_len = %" PRIu64, sc->fw_len);
+out:
+ free(buf);
}
static void
bnx2x_get_phy_info(sc);
} else {
/* Left mac of VF unfilled, PF should set it for VF */
- memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
+ memset(sc->link_params.mac_addr, 0, RTE_ETHER_ADDR_LEN);
}
sc->wol = 0;
/* set the default MTU (changed via ifconfig) */
- sc->mtu = ETHER_MTU;
+ sc->mtu = RTE_ETHER_MTU;
bnx2x_set_modes_bitmap(sc);
sc->fw_seq =
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
+ PMD_DRV_LOG(DEBUG, sc, "prev unload fw_seq 0x%04x",
+ sc->fw_seq);
bnx2x_prev_unload(sc);
}
shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
}
+ bnx2x_acquire_phy_lock(sc);
elink_common_init_phy(sc, shmem_base, shmem2_base,
sc->devinfo.chip_id, 0);
+ bnx2x_release_phy_lock(sc);
}
static void bnx2x_pf_disable(struct bnx2x_softc *sc)
/* clean the DMAE memory */
sc->dmae_ready = 1;
- ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8);
+ ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
- REG_WR(sc, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
if (!CHIP_REV_IS_SLOW(sc)) {
/* enable hw interrupt from doorbell Q */
static void bnx2x_link_reset(struct bnx2x_softc *sc)
{
if (!BNX2X_NOMCP(sc)) {
+ bnx2x_acquire_phy_lock(sc);
elink_lfa_reset(&sc->link_params, &sc->link_vars);
+ bnx2x_release_phy_lock(sc);
} else {
if (!CHIP_REV_IS_SLOW(sc)) {
PMD_DRV_LOG(WARNING, sc,
ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
ilt_cli.client_num = ILT_CLIENT_TM;
- ecore_ilt_boundry_init_op(sc, &ilt_cli, 0);
+ ecore_ilt_boundary_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
}
/* this assumes that reset_port() called before reset_func() */
for (i = 0; i < 5; i++)
if (flags & (1 << i)) {
- strcat(flag_str, flag[i]);
+ strlcat(flag_str, flag[i], sizeof(flag_str));
flags ^= (1 << i);
}
if (flags) {
static char unknown[BNX2X_INFO_STR_MAX];
snprintf(unknown, 32, "Unknown flag mask %x", flags);
- strcat(flag_str, unknown);
+ strlcat(flag_str, unknown, sizeof(flag_str));
}
return flag_str;
}
-/*
- * Prints useful adapter info.
- */
+/* Prints useful adapter info. */
void bnx2x_print_adapter_info(struct bnx2x_softc *sc)
{
int i = 0;
- __rte_unused uint32_t ext_phy_type;
- PMD_INIT_FUNC_TRACE(sc);
- if (sc->link_vars.phy_flags & PHY_XGXS_FLAG)
- ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(REG_RD(sc,
- sc->
- devinfo.shmem_base
- + offsetof(struct
- shmem_region,
- dev_info.port_hw_config
- [0].external_phy_config)));
- else
- ext_phy_type = ELINK_SERDES_EXT_PHY_TYPE(REG_RD(sc,
- sc->
- devinfo.shmem_base
- +
- offsetof(struct
- shmem_region,
- dev_info.port_hw_config
- [0].external_phy_config)));
-
- PMD_DRV_LOG(INFO, sc, "\n\n===================================\n");
+ PMD_DRV_LOG(INFO, sc, "========================================");
+ /* DPDK and Driver versions */
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "DPDK",
+ rte_version());
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "Driver",
+ bnx2x_pmd_version());
+ /* Firmware versions. */
+ PMD_DRV_LOG(INFO, sc, "%12s : %d.%d.%d",
+ "Firmware",
+ BNX2X_5710_FW_MAJOR_VERSION,
+ BNX2X_5710_FW_MINOR_VERSION,
+ BNX2X_5710_FW_REVISION_VERSION);
+ PMD_DRV_LOG(INFO, sc, "%12s : %s",
+ "Bootcode", sc->devinfo.bc_ver_str);
/* Hardware chip info. */
PMD_DRV_LOG(INFO, sc, "%12s : %#08x", "ASIC", sc->devinfo.chip_id);
PMD_DRV_LOG(INFO, sc, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A',
(CHIP_METAL(sc) >> 4));
-
- /* Bus info. */
- PMD_DRV_LOG(INFO, sc,
- "%12s : %d, ", "Bus PCIe", sc->devinfo.pcie_link_width);
+ /* Bus PCIe info. */
+ PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Vendor Id",
+ sc->devinfo.vendor_id);
+ PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Device Id",
+ sc->devinfo.device_id);
+ PMD_DRV_LOG(INFO, sc, "%12s : width x%d, ", "Bus PCIe",
+ sc->devinfo.pcie_link_width);
switch (sc->devinfo.pcie_link_speed) {
case 1:
PMD_DRV_LOG(INFO, sc, "%23s", "2.5 Gbps");
default:
PMD_DRV_LOG(INFO, sc, "%33s", "Unknown link speed");
}
-
/* Device features. */
PMD_DRV_LOG(INFO, sc, "%12s : ", "Flags");
-
/* Miscellaneous flags. */
if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) {
PMD_DRV_LOG(INFO, sc, "%18s", "MSI");
i++;
}
-
if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) {
if (i > 0)
PMD_DRV_LOG(INFO, sc, "|");
PMD_DRV_LOG(INFO, sc, "%20s", "MSI-X");
i++;
}
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO"));
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO"));
+ PMD_DRV_LOG(INFO, sc, "========================================");
+}
- if (IS_PF(sc)) {
- PMD_DRV_LOG(INFO, sc, "%12s : ", "Queues");
- switch (sc->sp->rss_rdata.rss_mode) {
- case ETH_RSS_MODE_DISABLED:
- PMD_DRV_LOG(INFO, sc, "%19s", "None");
- break;
- case ETH_RSS_MODE_REGULAR:
- PMD_DRV_LOG(INFO, sc,
- "%18s : %d", "RSS", sc->num_queues);
- break;
- default:
- PMD_DRV_LOG(INFO, sc, "%22s", "Unknown");
- break;
- }
- }
-
- /* RTE and Driver versions */
- PMD_DRV_LOG(INFO, sc, "%12s : %s", "DPDK",
- rte_version());
- PMD_DRV_LOG(INFO, sc, "%12s : %s", "Driver",
- bnx2x_pmd_version());
+/* Prints useful device info. */
+void bnx2x_print_device_info(struct bnx2x_softc *sc)
+{
+ __rte_unused uint32_t ext_phy_type;
+ uint32_t offset, reg_val;
- /* Firmware versions and device features. */
- PMD_DRV_LOG(INFO, sc, "%12s : %d.%d.%d",
- "Firmware",
- BNX2X_5710_FW_MAJOR_VERSION,
- BNX2X_5710_FW_MINOR_VERSION,
- BNX2X_5710_FW_REVISION_VERSION);
- PMD_DRV_LOG(INFO, sc, "%12s : %s",
- "Bootcode", sc->devinfo.bc_ver_str);
+ PMD_INIT_FUNC_TRACE(sc);
+ offset = offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].external_phy_config);
+ reg_val = REG_RD(sc, sc->devinfo.shmem_base + offset);
+ if (sc->link_vars.phy_flags & PHY_XGXS_FLAG)
+ ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(reg_val);
+ else
+ ext_phy_type = ELINK_SERDES_EXT_PHY_TYPE(reg_val);
- PMD_DRV_LOG(INFO, sc, "\n\n===================================\n");
+ /* Device features. */
PMD_DRV_LOG(INFO, sc, "%12s : %u", "Bnx2x Func", sc->pcie_func);
PMD_DRV_LOG(INFO, sc,
"%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags));
PMD_DRV_LOG(INFO, sc, "%12s : %s", "DMAE Is",
(sc->dmae_ready ? "Ready" : "Not Ready"));
- PMD_DRV_LOG(INFO, sc, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO"));
- PMD_DRV_LOG(INFO, sc, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO"));
PMD_DRV_LOG(INFO, sc, "%12s : %u", "MTU", sc->mtu);
PMD_DRV_LOG(INFO, sc,
"%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type));
if (sc->recovery_state)
PMD_DRV_LOG(INFO, sc, "%12s : %s", "Recovery",
get_recovery_state(sc->recovery_state));
+ /* Queue info. */
+ if (IS_PF(sc)) {
+ switch (sc->sp->rss_rdata.rss_mode) {
+ case ETH_RSS_MODE_DISABLED:
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - None");
+ break;
+ case ETH_RSS_MODE_REGULAR:
+ PMD_DRV_LOG(INFO, sc, "%12s : %s,", "Queues", "RSS mode - Regular");
+ PMD_DRV_LOG(INFO, sc, "%16d", sc->num_queues);
+ break;
+ default:
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - Unknown");
+ break;
+ }
+ }
PMD_DRV_LOG(INFO, sc, "%12s : CQ = %lx, EQ = %lx", "SPQ Left",
sc->cq_spq_left, sc->eq_spq_left);
+
PMD_DRV_LOG(INFO, sc,
"%12s : %x", "Switch", sc->link_params.switch_cfg);
- PMD_DRV_LOG(INFO, sc, "\n\n===================================\n");
+ PMD_DRV_LOG(INFO, sc, "pcie_bus=%d, pcie_device=%d",
+ sc->pcie_bus, sc->pcie_device);
+ PMD_DRV_LOG(INFO, sc, "bar0.addr=%p, bar1.addr=%p",
+ sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
+ PMD_DRV_LOG(INFO, sc, "port=%d, path=%d, vnic=%d, func=%d",
+ PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
}