/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015 QLogic Corporation.
* All rights reserved.
+ * www.qlogic.com
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#define BNX2X_DRIVER_VERSION "1.78.18"
#include "ecore_init.h"
#include "ecore_init_ops.h"
+#include "rte_version.h"
#include "rte_pci_dev_ids.h"
#include <sys/types.h>
#include <fcntl.h>
#include <zlib.h>
+#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
+#define BNX2X_PMD_VERSION_MAJOR 1
+#define BNX2X_PMD_VERSION_MINOR 0
+#define BNX2X_PMD_VERSION_PATCH 0
+
+static inline const char *
+bnx2x_pmd_version(void)
+{
+ static char version[32];
+
+ snprintf(version, sizeof(version), "%s %s_%d.%d.%d",
+ BNX2X_PMD_VER_PREFIX,
+ BNX2X_DRIVER_VERSION,
+ BNX2X_PMD_VERSION_MAJOR,
+ BNX2X_PMD_VERSION_MINOR,
+ BNX2X_PMD_VERSION_PATCH);
+
+ return version;
+}
+
static z_stream zlib_stream;
#define EVL_VLID_MASK 0x0FFF
dma->sc = sc;
if (IS_PF(sc))
- sprintf(mz_name, "bnx2x%d_%s_%lx", SC_ABS_FUNC(sc), msg,
+ sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
rte_get_timer_cycles());
else
- sprintf(mz_name, "bnx2x%d_%s_%lx", sc->pcie_device, msg,
+ sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
rte_get_timer_cycles());
/* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */
dma->paddr = (uint64_t) z->phys_addr;
dma->vaddr = z->addr;
- PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%lx", msg, dma->vaddr, dma->paddr);
+ PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
return 0;
}
uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type)
{
- return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
- DMAE_COMMAND_C_TYPE_ENABLE));
+ return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+ DMAE_COMMAND_C_TYPE_ENABLE);
}
uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode)
{
- return (opcode & ~DMAE_COMMAND_SRC_RESET);
+ return opcode & ~DMAE_COMMAND_SRC_RESET;
}
uint32_t
mb(); /* status block fields can change */
hw_cons = le16toh(*fp->tx_cons_sb);
- return (hw_cons != txq->tx_pkt_head);
+ return hw_cons != txq->tx_pkt_head;
}
static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) ==
MAX_RCQ_ENTRIES(rxq)))
rx_cq_cons_sb++;
- return (rxq->rx_cq_head != rx_cq_cons_sb);
+ return rxq->rx_cq_head != rx_cq_cons_sb;
}
static void
/* Update producers */
bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod);
- return (sw_cq_cons != hw_cq_cons);
+ return sw_cq_cons != hw_cq_cons;
}
static uint16_t
struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)];
if (likely(tx_mbuf != NULL)) {
- rte_pktmbuf_free(tx_mbuf);
+ rte_pktmbuf_free_seg(tx_mbuf);
} else {
PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu",
- fp->index, TX_BD(pkt_idx, txq));
+ fp->index, (unsigned long)TX_BD(pkt_idx, txq));
}
txq->sw_ring[TX_BD(pkt_idx, txq)] = NULL;
ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata);
ramrod_param.rdata_mapping =
- (phys_addr_t) ((void *)BNX2X_SP_MAPPING(sc, rx_mode_rdata)),
+ (phys_addr_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
ramrod_param.ramrod_flags = ramrod_flags;
* the mbuf and return to the caller.
*
* Returns:
- * 0 = Success, !0 = Failure
+ * int: Number of TX BDs used for the mbuf
+ *
* Note the side effect that an mbuf may be freed if it causes a problem.
*/
-int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts)
+int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
{
- struct rte_mbuf *m0;
struct eth_tx_start_bd *tx_start_bd;
uint16_t bd_prod, pkt_prod;
- int m_tx;
struct bnx2x_softc *sc;
uint32_t nbds = 0;
- struct bnx2x_fastpath *fp;
sc = txq->sc;
- fp = &sc->fp[txq->queue_id];
-
bd_prod = txq->tx_bd_tail;
pkt_prod = txq->tx_pkt_tail;
- for (m_tx = 0; m_tx < m_pkts; m_tx++) {
-
- m0 = *m_head++;
-
- if (unlikely(txq->nb_tx_avail < 3)) {
- PMD_TX_LOG(ERR, "no enough bds %d/%d",
- bd_prod, txq->nb_tx_avail);
- return -ENOMEM;
- }
+ txq->sw_ring[TX_BD(pkt_prod, txq)] = m0;
- txq->sw_ring[TX_BD(pkt_prod, txq)] = m0;
+ tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
- tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
+ tx_start_bd->addr =
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
+ tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
+ tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ tx_start_bd->general_data =
+ (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
- tx_start_bd->addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(m0));
- tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
- tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- tx_start_bd->general_data =
- (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+ tx_start_bd->nbd = rte_cpu_to_le_16(2);
- tx_start_bd->nbd = rte_cpu_to_le_16(2);
+ if (m0->ol_flags & PKT_TX_VLAN_PKT) {
+ tx_start_bd->vlan_or_ethertype =
+ rte_cpu_to_le_16(m0->vlan_tci);
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_OUTBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ } else {
+ if (IS_PF(sc))
+ tx_start_bd->vlan_or_ethertype =
+ rte_cpu_to_le_16(pkt_prod);
+ else {
+ struct ether_hdr *eh =
+ rte_pktmbuf_mtod(m0, struct ether_hdr *);
- if (m0->ol_flags & PKT_TX_VLAN_PKT) {
tx_start_bd->vlan_or_ethertype =
- rte_cpu_to_le_16(m0->vlan_tci);
- tx_start_bd->bd_flags.as_bitfield |=
- (X_ETH_OUTBAND_VLAN <<
- ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
- } else {
- if (IS_PF(sc))
- tx_start_bd->vlan_or_ethertype =
- rte_cpu_to_le_16(pkt_prod);
- else {
- struct ether_hdr *eh
- = rte_pktmbuf_mtod(m0, struct ether_hdr *);
-
- tx_start_bd->vlan_or_ethertype = eh->ether_type;
- }
+ rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type));
}
+ }
- bd_prod = NEXT_TX_BD(bd_prod);
- if (IS_VF(sc)) {
- struct eth_tx_parse_bd_e2 *tx_parse_bd;
- uint8_t *data = rte_pktmbuf_mtod(m0, uint8_t *);
-
- tx_parse_bd =
- &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2;
- tx_parse_bd->parsing_data =
- (1 << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
-
- rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi,
- &data[0], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid,
- &data[2], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo,
- &data[4], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi,
- &data[6], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid,
- &data[8], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo,
- &data[10], 2);
-
- tx_parse_bd->data.mac_addr.dst_hi =
- rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi);
- tx_parse_bd->data.mac_addr.dst_mid =
- rte_cpu_to_be_16(tx_parse_bd->data.
- mac_addr.dst_mid);
- tx_parse_bd->data.mac_addr.dst_lo =
- rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo);
- tx_parse_bd->data.mac_addr.src_hi =
- rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi);
- tx_parse_bd->data.mac_addr.src_mid =
- rte_cpu_to_be_16(tx_parse_bd->data.
- mac_addr.src_mid);
- tx_parse_bd->data.mac_addr.src_lo =
- rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo);
-
- PMD_TX_LOG(DEBUG,
- "PBD dst %x %x %x src %x %x %x p_data %x",
- tx_parse_bd->data.mac_addr.dst_hi,
- tx_parse_bd->data.mac_addr.dst_mid,
- tx_parse_bd->data.mac_addr.dst_lo,
- tx_parse_bd->data.mac_addr.src_hi,
- tx_parse_bd->data.mac_addr.src_mid,
- tx_parse_bd->data.mac_addr.src_lo,
- tx_parse_bd->parsing_data);
- }
+ bd_prod = NEXT_TX_BD(bd_prod);
+ if (IS_VF(sc)) {
+ struct eth_tx_parse_bd_e2 *tx_parse_bd;
+ const struct ether_hdr *eh =
+ rte_pktmbuf_mtod(m0, struct ether_hdr *);
+ uint8_t mac_type = UNICAST_ADDRESS;
+
+ tx_parse_bd =
+ &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2;
+ if (is_multicast_ether_addr(&eh->d_addr)) {
+ if (is_broadcast_ether_addr(&eh->d_addr))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+ tx_parse_bd->parsing_data =
+ (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+
+ rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi,
+ &eh->d_addr.addr_bytes[0], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid,
+ &eh->d_addr.addr_bytes[2], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo,
+ &eh->d_addr.addr_bytes[4], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi,
+ &eh->s_addr.addr_bytes[0], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid,
+ &eh->s_addr.addr_bytes[2], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo,
+ &eh->s_addr.addr_bytes[4], 2);
+
+ tx_parse_bd->data.mac_addr.dst_hi =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi);
+ tx_parse_bd->data.mac_addr.dst_mid =
+ rte_cpu_to_be_16(tx_parse_bd->data.
+ mac_addr.dst_mid);
+ tx_parse_bd->data.mac_addr.dst_lo =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo);
+ tx_parse_bd->data.mac_addr.src_hi =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi);
+ tx_parse_bd->data.mac_addr.src_mid =
+ rte_cpu_to_be_16(tx_parse_bd->data.
+ mac_addr.src_mid);
+ tx_parse_bd->data.mac_addr.src_lo =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo);
PMD_TX_LOG(DEBUG,
- "start bd: nbytes %d flags %x vlan %x\n",
- tx_start_bd->nbytes,
- tx_start_bd->bd_flags.as_bitfield,
- tx_start_bd->vlan_or_ethertype);
+ "PBD dst %x %x %x src %x %x %x p_data %x",
+ tx_parse_bd->data.mac_addr.dst_hi,
+ tx_parse_bd->data.mac_addr.dst_mid,
+ tx_parse_bd->data.mac_addr.dst_lo,
+ tx_parse_bd->data.mac_addr.src_hi,
+ tx_parse_bd->data.mac_addr.src_mid,
+ tx_parse_bd->data.mac_addr.src_lo,
+ tx_parse_bd->parsing_data);
+ }
- bd_prod = NEXT_TX_BD(bd_prod);
- pkt_prod++;
+ PMD_TX_LOG(DEBUG,
+ "start bd: nbytes %d flags %x vlan %x\n",
+ tx_start_bd->nbytes,
+ tx_start_bd->bd_flags.as_bitfield,
+ tx_start_bd->vlan_or_ethertype);
- if (TX_IDX(bd_prod) < 2) {
- nbds++;
- }
- }
+ bd_prod = NEXT_TX_BD(bd_prod);
+ pkt_prod++;
+
+ if (TX_IDX(bd_prod) < 2)
+ nbds++;
- txq->nb_tx_avail -= m_pkts << 1;
+ txq->nb_tx_avail -= 2;
txq->tx_bd_tail = bd_prod;
txq->tx_pkt_tail = pkt_prod;
- mb();
- fp->tx_db.data.prod += (m_pkts << 1) + nbds;
- DOORBELL(sc, txq->queue_id, fp->tx_db.raw);
- mb();
-
- return 0;
+ return nbds + 2;
}
static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc)
/* get the Rx buffer size for RX frames */
sc->fp[i].rx_buf_size =
(IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
-
- /* get the mbuf allocation size for RX frames */
- if (sc->fp[i].rx_buf_size <= MCLBYTES) {
- sc->fp[i].mbuf_alloc_size = MCLBYTES;
- } else if (sc->fp[i].rx_buf_size <= BNX2X_PAGE_SIZE) {
- sc->fp[i].mbuf_alloc_size = PAGE_SIZE;
- } else {
- sc->fp[i].mbuf_alloc_size = MJUM9BYTES;
- }
}
}
/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
static uint8_t bnx2x_reset_is_global(struct bnx2x_softc *sc)
{
- return (REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT);
+ return REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT;
}
/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
val = ((val & mask) >> shift);
- return (val != 0);
+ return val != 0;
}
/* set pf load mark */
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_STAT_QUERY:
- PMD_DRV_LOG(DEBUG, "got statistics completion event %d",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "got statistics completion event %d",
sc->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
/* SP events: STAT_QUERY and others */
if (status & BNX2X_DEF_SB_IDX) {
/* handle EQ completions */
- PMD_DRV_LOG(DEBUG, "---> EQ INTR <---");
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "---> EQ INTR <---");
bnx2x_eq_int(sc);
bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
le16toh(sc->def_idx), IGU_INT_NOP, 1);
return 0;
}
- PMD_DRV_LOG(DEBUG, "Interrupt status 0x%04x", status);
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "Interrupt status 0x%04x", status);
//bnx2x_dump_status_block(sc);
FOR_EACH_ETH_QUEUE(sc, i) {
ecore_init_func_obj(sc,
&sc->func_obj,
- BNX2X_SP(sc, func_rdata), (phys_addr_t) ((void *)
- BNX2X_SP_MAPPING(sc, func_rdata)), BNX2X_SP(sc, func_afex_rdata), (phys_addr_t) ((void *)
- BNX2X_SP_MAPPING(sc, func_afex_rdata)), &bnx2x_func_sp_drv);
+ BNX2X_SP(sc, func_rdata),
+ (phys_addr_t)BNX2X_SP_MAPPING(sc, func_rdata),
+ BNX2X_SP(sc, func_afex_rdata),
+ (phys_addr_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
+ &bnx2x_func_sp_drv);
}
static int bnx2x_init_hw(struct bnx2x_softc *sc, uint32_t load_code)
static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
{
if (CHIP_IS_E1x(fp->sc)) {
- return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
+ return fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H;
} else {
- return (fp->cl_id);
+ return fp->cl_id;
}
}
uint32_t offset = BAR_USTRORM_INTMEM;
if (IS_VF(sc)) {
- return (PXP_VF_ADDR_USDM_QUEUES_START +
+ return PXP_VF_ADDR_USDM_QUEUES_START +
(sc->acquire_resp.resc.hw_qid[fp->index] *
- sizeof(struct ustorm_queue_zone_data)));
+ sizeof(struct ustorm_queue_zone_data));
} else if (!CHIP_IS_E1x(sc)) {
offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
} else {
cids,
sc->max_cos,
SC_FUNC(sc),
- BNX2X_SP(sc, q_rdata), (phys_addr_t) ((void *)
- BNX2X_SP_MAPPING
- (sc, q_rdata)),
+ BNX2X_SP(sc, q_rdata),
+ (phys_addr_t)BNX2X_SP_MAPPING(sc, q_rdata),
q_type);
/* configure classification DBs */
fp->cl_id,
idx,
SC_FUNC(sc),
- BNX2X_SP(sc, mac_rdata), (phys_addr_t) ((void *)
- BNX2X_SP_MAPPING
- (sc,
- mac_rdata)),
+ BNX2X_SP(sc, mac_rdata),
+ (phys_addr_t)BNX2X_SP_MAPPING(sc, mac_rdata),
ECORE_FILTER_MAC_PENDING, &sc->sp_state,
ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool);
}
return;
}
-/*
- * Activate the BD ring...
- * Warning, this will generate an interrupt (to the TSTORM)
- * so this can only be done after the chip is initialized
- */
+ rxq->rx_bd_head = 0;
+ rxq->rx_bd_tail = rxq->nb_rx_desc;
+ rxq->rx_cq_head = 0;
+ rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq);
+ *fp->rx_cq_cons_sb = 0;
+
+ /*
+ * Activate the BD ring...
+ * Warning, this will generate an interrupt (to the TSTORM)
+ * so this can only be done after the chip is initialized
+ */
bnx2x_update_rx_prod(sc, fp, rxq->rx_bd_tail, rxq->rx_cq_tail);
if (i != 0) {
sc->fp[0].index,
SC_FUNC(sc),
SC_FUNC(sc),
- BNX2X_SP(sc, mcast_rdata), (phys_addr_t) ((void *)
- BNX2X_SP_MAPPING(sc, mcast_rdata)), ECORE_FILTER_MCAST_PENDING, &sc->sp_state, o_type);
+ BNX2X_SP(sc, mcast_rdata),
+ (phys_addr_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
+ ECORE_FILTER_MCAST_PENDING,
+ &sc->sp_state, o_type);
/* Setup CAM credit pools */
ecore_init_mac_credit_pool(sc,
sc->fp[0].index,
SC_FUNC(sc),
SC_FUNC(sc),
- BNX2X_SP(sc, rss_rdata), (phys_addr_t) ((void *)
- BNX2X_SP_MAPPING(sc, rss_rdata)), ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX);
+ BNX2X_SP(sc, rss_rdata),
+ (phys_addr_t)BNX2X_SP_MAPPING(sc, rss_rdata),
+ ECORE_FILTER_RSS_CONF_PENDING,
+ &sc->sp_state, ECORE_OBJ_TYPE_RX);
}
/*
pause->pri_map = 1;
/* rxq setup */
- rxq_init->dscr_map = (phys_addr_t)((void *)rxq->rx_ring_phys_addr);
- rxq_init->rcq_map = (phys_addr_t)((void *)rxq->cq_ring_phys_addr);
- rxq_init->rcq_np_map = (phys_addr_t)((void *)(rxq->cq_ring_phys_addr +
- BNX2X_PAGE_SIZE));
+ rxq_init->dscr_map = (phys_addr_t)rxq->rx_ring_phys_addr;
+ rxq_init->rcq_map = (phys_addr_t)rxq->cq_ring_phys_addr;
+ rxq_init->rcq_np_map = (phys_addr_t)(rxq->cq_ring_phys_addr +
+ BNX2X_PAGE_SIZE);
/*
* This should be a maximum number of data bytes that may be
PMD_TX_LOG(ERR, "ERROR: TX queue is NULL");
return;
}
- txq_init->dscr_map = (phys_addr_t)((void *)txq->tx_ring_phys_addr);
+ txq_init->dscr_map = (phys_addr_t)txq->tx_ring_phys_addr;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
int rc;
/* Check if the driver is still running and bail out if it is. */
- if (sc->link_vars.link_up) {
+ if (sc->state != BNX2X_STATE_CLOSED) {
PMD_DRV_LOG(DEBUG, "Init called while driver is running!");
rc = 0;
goto bnx2x_init_done;
static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc)
{
- return (bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) &
- PCIM_EXP_STA_TRANSACTION_PND);
+ return bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) &
+ PCIM_EXP_STA_TRANSACTION_PND;
}
/*
return -ENOMEM;
}
+#ifndef __FreeBSD__
pci_read(sc, PCI_STATUS, &status, 2);
if (!(status & PCI_STATUS_CAP_LIST)) {
+#else
+ pci_read(sc, PCIR_STATUS, &status, 2);
+ if (!(status & PCIM_STATUS_CAPPRESENT)) {
+#endif
PMD_DRV_LOG(NOTICE, "PCIe capability reading failed");
return -1;
}
+#ifndef __FreeBSD__
pci_read(sc, PCI_CAPABILITY_LIST, &pci_cap.next, 1);
+#else
+ pci_read(sc, PCIR_CAP_PTR, &pci_cap.next, 1);
+#endif
while (pci_cap.next) {
cap->addr = pci_cap.next & ~3;
pci_read(sc, pci_cap.next & ~3, &pci_cap, 2);
static void bnx2x_init_rte(struct bnx2x_softc *sc)
{
- sc->max_tx_queues = 128;
- sc->max_rx_queues = 128;
+ if (IS_VF(sc)) {
+ sc->max_tx_queues = BNX2X_VF_MAX_QUEUES_PER_VF;
+ sc->max_rx_queues = BNX2X_VF_MAX_QUEUES_PER_VF;
+ } else {
+ sc->max_tx_queues = 128;
+ sc->max_rx_queues = 128;
+ }
}
#define FW_HEADER_LEN 104
sc->fw_len = st.st_size;
if (sc->fw_len < FW_HEADER_LEN) {
- PMD_DRV_LOG(NOTICE, "Invalid fw size: %lu", sc->fw_len);
+ PMD_DRV_LOG(NOTICE, "Invalid fw size: %" PRIu64, sc->fw_len);
return;
}
- PMD_DRV_LOG(DEBUG, "fw_len = %lu", sc->fw_len);
+ PMD_DRV_LOG(DEBUG, "fw_len = %" PRIu64, sc->fw_len);
}
static void
{
uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
- return (base + (SC_ABS_FUNC(sc)) * stride);
+ return base + (SC_ABS_FUNC(sc)) * stride;
}
/*
{
/* adjust polling timeout */
if (CHIP_REV_IS_EMUL(sc)) {
- return (FLR_POLL_CNT * 2000);
+ return FLR_POLL_CNT * 2000;
}
if (CHIP_REV_IS_FPGA(sc)) {
- return (FLR_POLL_CNT * 120);
+ return FLR_POLL_CNT * 120;
}
return FLR_POLL_CNT;
for (i = 0; i < L2_ILT_LINES(sc); i++) {
ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
- (phys_addr_t)((void *)sc->context[i].vcxt_dma.paddr);
+ (phys_addr_t)sc->context[i].vcxt_dma.paddr;
ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
}
ecore_ilt_init_op(sc, INITOP_SET);
PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
/* Hardware chip info. */
- PMD_INIT_LOG(DEBUG, "%10s : %#08x\n", "ASIC", sc->devinfo.chip_id);
- PMD_INIT_LOG(DEBUG, "%10s : %c%d\n", "Rev", (CHIP_REV(sc) >> 12) + 'A',
+ PMD_INIT_LOG(DEBUG, "%12s : %#08x", "ASIC", sc->devinfo.chip_id);
+ PMD_INIT_LOG(DEBUG, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A',
(CHIP_METAL(sc) >> 4));
/* Bus info. */
- PMD_INIT_LOG(DEBUG, "%10s : %d, ", "Bus PCIe", sc->devinfo.pcie_link_width);
+ PMD_INIT_LOG(DEBUG, "%12s : %d, ", "Bus PCIe", sc->devinfo.pcie_link_width);
switch (sc->devinfo.pcie_link_speed) {
case 1:
- PMD_INIT_LOG(DEBUG, "2.5 Gbps\n");
+ PMD_INIT_LOG(DEBUG, "%23s", "2.5 Gbps");
break;
case 2:
- PMD_INIT_LOG(DEBUG, "5 Gbps\n");
+ PMD_INIT_LOG(DEBUG, "%21s", "5 Gbps");
break;
case 4:
- PMD_INIT_LOG(DEBUG, "8 Gbps\n");
+ PMD_INIT_LOG(DEBUG, "%21s", "8 Gbps");
break;
default:
- PMD_INIT_LOG(DEBUG, "Unknown link speed\n");
+ PMD_INIT_LOG(DEBUG, "%33s", "Unknown link speed");
}
/* Device features. */
- PMD_INIT_LOG(DEBUG, "%10s : ", "Flags");
+ PMD_INIT_LOG(DEBUG, "%12s : ", "Flags");
/* Miscellaneous flags. */
if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) {
- PMD_INIT_LOG(DEBUG, "MSI");
+ PMD_INIT_LOG(DEBUG, "%18s", "MSI");
i++;
}
if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) {
if (i > 0)
PMD_INIT_LOG(DEBUG, "|");
- PMD_INIT_LOG(DEBUG, "MSI-X");
+ PMD_INIT_LOG(DEBUG, "%20s", "MSI-X");
i++;
}
- PMD_INIT_LOG(DEBUG, "\n");
-
if (IS_PF(sc)) {
- PMD_INIT_LOG(DEBUG, "\n%10s : ", "Queues");
+ PMD_INIT_LOG(DEBUG, "%12s : ", "Queues");
switch (sc->sp->rss_rdata.rss_mode) {
case ETH_RSS_MODE_DISABLED:
- PMD_INIT_LOG(DEBUG, "None\n");
+ PMD_INIT_LOG(DEBUG, "%19s", "None");
break;
case ETH_RSS_MODE_REGULAR:
- PMD_INIT_LOG(DEBUG, "RSS : %d\n", sc->num_queues);
+ PMD_INIT_LOG(DEBUG, "%18s : %d", "RSS", sc->num_queues);
break;
default:
- PMD_INIT_LOG(DEBUG, "Unknown\n");
+ PMD_INIT_LOG(DEBUG, "%22s", "Unknown");
break;
}
}
+ /* RTE and Driver versions */
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "DPDK",
+ rte_version());
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "Driver",
+ bnx2x_pmd_version());
+
/* Firmware versions and device features. */
- PMD_INIT_LOG(DEBUG, "%10s : %d.%d.%d\n%10s : %s\n",
+ PMD_INIT_LOG(DEBUG, "%12s : %d.%d.%d",
"Firmware",
BNX2X_5710_FW_MAJOR_VERSION,
BNX2X_5710_FW_MINOR_VERSION,
- BNX2X_5710_FW_REVISION_VERSION,
+ BNX2X_5710_FW_REVISION_VERSION);
+ PMD_INIT_LOG(DEBUG, "%12s : %s",
"Bootcode", sc->devinfo.bc_ver_str);
- PMD_INIT_LOG(DEBUG, "===================================\n");
- PMD_INIT_LOG(DEBUG, "%10s : %u\n", "Bnx2x Func", sc->pcie_func);
- PMD_INIT_LOG(DEBUG, "%10s : %s\n", "Bnx2x Flags", get_bnx2x_flags(sc->flags));
- PMD_INIT_LOG(DEBUG, "%10s : %s\n", "DMAE Is",
+ PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
+ PMD_INIT_LOG(DEBUG, "%12s : %u", "Bnx2x Func", sc->pcie_func);
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags));
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "DMAE Is",
(sc->dmae_ready ? "Ready" : "Not Ready"));
- PMD_INIT_LOG(DEBUG, "%10s : %s\n", "OVLAN", (OVLAN(sc) ? "YES" : "NO"));
- PMD_INIT_LOG(DEBUG, "%10s : %s\n", "MF", (IS_MF(sc) ? "YES" : "NO"));
- PMD_INIT_LOG(DEBUG, "%10s : %u\n", "MTU", sc->mtu);
- PMD_INIT_LOG(DEBUG, "%10s : %s\n", "PHY Type", get_ext_phy_type(ext_phy_type));
- PMD_INIT_LOG(DEBUG, "%10s : ", "MAC Addr");
- for (i = 0; i < 6; i++)
- PMD_INIT_LOG(DEBUG, "%x%s", sc->link_params.mac_addr[i],
- i < 5 ? ":" : "\n");
- PMD_INIT_LOG(DEBUG, "%10s : %s\n", "RX Mode", get_rx_mode(sc->rx_mode));
- PMD_INIT_LOG(DEBUG, "%10s : %s\n", "State", get_state(sc->state));
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO"));
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO"));
+ PMD_INIT_LOG(DEBUG, "%12s : %u", "MTU", sc->mtu);
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type));
+ PMD_INIT_LOG(DEBUG, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr",
+ sc->link_params.mac_addr[0],
+ sc->link_params.mac_addr[1],
+ sc->link_params.mac_addr[2],
+ sc->link_params.mac_addr[3],
+ sc->link_params.mac_addr[4],
+ sc->link_params.mac_addr[5]);
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode));
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "State", get_state(sc->state));
if (sc->recovery_state)
- PMD_INIT_LOG(DEBUG, "%10s : %s\n", "Recovery",
+ PMD_INIT_LOG(DEBUG, "%12s : %s", "Recovery",
get_recovery_state(sc->recovery_state));
- PMD_INIT_LOG(DEBUG, "%10s : CQ = %lx, EQ = %lx\n", "SPQ Left",
+ PMD_INIT_LOG(DEBUG, "%12s : CQ = %lx, EQ = %lx", "SPQ Left",
sc->cq_spq_left, sc->eq_spq_left);
- PMD_INIT_LOG(DEBUG, "%10s : %x\n", "Switch", sc->link_params.switch_cfg);
- PMD_INIT_LOG(DEBUG, "===================================\n\n");
+ PMD_INIT_LOG(DEBUG, "%12s : %x", "Switch", sc->link_params.switch_cfg);
+ PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
}