static void bnx2x_free_mem(struct bnx2x_softc *sc);
static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc);
static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc);
-static __attribute__ ((noinline))
+static __rte_noinline
int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
rte_get_timer_cycles());
/* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */
- z = rte_memzone_reserve_aligned(mz_name, (uint64_t) (size),
+ z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size,
SOCKET_ID_ANY,
- 0, align);
+ RTE_MEMZONE_IOVA_CONTIG, align);
if (z == NULL) {
PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg);
return -ENOMEM;
}
- dma->paddr = (uint64_t) z->phys_addr;
+ dma->paddr = (uint64_t) z->iova;
dma->vaddr = z->addr;
PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
}
void
-bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr, uint32_t dst_addr,
+bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, uint32_t dst_addr,
uint32_t len32)
{
struct dmae_command dmae;
}
static void
-bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr,
+bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
uint32_t addr, uint32_t len)
{
uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
static void
__storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr,
- phys_addr_t mapping)
+ rte_iova_t mapping)
{
REG_WR(sc, addr, U64_LO(mapping));
REG_WR(sc, (addr + 4), U64_HI(mapping));
}
static void
-storm_memset_spq_addr(struct bnx2x_softc *sc, phys_addr_t mapping,
+storm_memset_spq_addr(struct bnx2x_softc *sc, rte_iova_t mapping,
uint16_t abs_fid)
{
uint32_t addr = (XSEM_REG_FAST_MEMORY +
/*
* Post a slowpath command.
*
- * A slowpath command is used to propogate a configuration change through
+ * A slowpath command is used to propagate a configuration change through
* the controller in a controlled manner, allowing each STORM processor and
* other H/W blocks to phase in the change. The commands sent on the
* slowpath are referred to as ramrods. Depending on the ramrod used the
bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
union eth_rx_cqe *rr_cqe)
{
-#ifdef RTE_LIBRTE_BNX2X_DEBUG
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
-#endif
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj;
ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata);
ramrod_param.rdata_mapping =
- (phys_addr_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
ramrod_param.ramrod_flags = ramrod_flags;
}
/* stop the controller */
-__attribute__ ((noinline))
+__rte_noinline
int
bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link)
{
/*
* Nothing to do during unload if previous bnx2x_nic_load()
- * did not completed succesfully - all resourses are released.
+ * did not completed successfully - all resourses are released.
*/
if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {
return 0;
tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
tx_start_bd->addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
+ rte_cpu_to_le_64(rte_mbuf_data_iova(m0));
tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_start_bd->general_data =
mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
/*
- * If the olny PXP2_EOP_ERROR_BIT is set in
+ * If the only PXP2_EOP_ERROR_BIT is set in
* STS0 and STS1 - clear it
*
* probably we lose additional attentions between
ecore_init_func_obj(sc,
&sc->func_obj,
BNX2X_SP(sc, func_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, func_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, func_rdata),
BNX2X_SP(sc, func_afex_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
&bnx2x_func_sp_drv);
}
}
static void
-bnx2x_init_sb(struct bnx2x_softc *sc, phys_addr_t busaddr, int vfid,
+bnx2x_init_sb(struct bnx2x_softc *sc, rte_iova_t busaddr, int vfid,
uint8_t vf_valid, int fw_sb_id, int igu_sb_id)
{
struct hc_status_block_data_e2 sb_data_e2;
sc->max_cos,
SC_FUNC(sc),
BNX2X_SP(sc, q_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, q_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, q_rdata),
q_type);
/* configure classification DBs */
idx,
SC_FUNC(sc),
BNX2X_SP(sc, mac_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, mac_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, mac_rdata),
ECORE_FILTER_MAC_PENDING, &sc->sp_state,
ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool);
}
static void bnx2x_init_def_sb(struct bnx2x_softc *sc)
{
struct host_sp_status_block *def_sb = sc->def_sb;
- phys_addr_t mapping = sc->def_sb_dma.paddr;
+ rte_iova_t mapping = sc->def_sb_dma.paddr;
int igu_sp_sb_index;
int igu_seg_id;
int port = SC_PORT(sc);
SC_FUNC(sc),
SC_FUNC(sc),
BNX2X_SP(sc, mcast_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
ECORE_FILTER_MCAST_PENDING,
&sc->sp_state, o_type);
SC_FUNC(sc),
SC_FUNC(sc),
BNX2X_SP(sc, rss_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, rss_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata),
ECORE_FILTER_RSS_CONF_PENDING,
&sc->sp_state, ECORE_OBJ_TYPE_RX);
}
(val | HC_CONFIG_0_REG_BLOCK_DISABLE_0));
} else {
-/* Prevent incomming interrupts in IGU */
+/* Prevent incoming interrupts in IGU */
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
if (close)
pause->pri_map = 1;
/* rxq setup */
- rxq_init->dscr_map = (phys_addr_t)rxq->rx_ring_phys_addr;
- rxq_init->rcq_map = (phys_addr_t)rxq->cq_ring_phys_addr;
- rxq_init->rcq_np_map = (phys_addr_t)(rxq->cq_ring_phys_addr +
+ rxq_init->dscr_map = (rte_iova_t)rxq->rx_ring_phys_addr;
+ rxq_init->rcq_map = (rte_iova_t)rxq->cq_ring_phys_addr;
+ rxq_init->rcq_np_map = (rte_iova_t)(rxq->cq_ring_phys_addr +
BNX2X_PAGE_SIZE);
/*
PMD_TX_LOG(ERR, "ERROR: TX queue is NULL");
return;
}
- txq_init->dscr_map = (phys_addr_t)txq->tx_ring_phys_addr;
+ txq_init->dscr_map = (rte_iova_t)txq->tx_ring_phys_addr;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
- (void)rte_memcpy(params.ind_table, rss_obj->ind_table,
+ rte_memcpy(params.ind_table, rss_obj->ind_table,
sizeof(params.ind_table));
if (config_hash) {
/* fill a user request section if needed */
if (!bnx2x_test_bit(RAMROD_CONT, ramrod_flags)) {
- (void)rte_memcpy(ramrod_param.user_req.u.mac.mac, mac,
+ rte_memcpy(ramrod_param.user_req.u.mac.mac, mac,
ETH_ALEN);
bnx2x_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
sc->link_cnt++;
/* report new link params and remember the state for the next time */
- (void)rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
+ rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&cur_data.link_report_flags)) {
}
/* start the controller */
-static __attribute__ ((noinline))
+static __rte_noinline
int bnx2x_nic_load(struct bnx2x_softc *sc)
{
uint32_t val;
struct bnx2x_pci_cap *caps;
uint16_t link_status;
-#ifdef RTE_LIBRTE_BNX2X_DEBUG
int reg = 0;
-#endif
/* check if PCI Power Management is enabled */
caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP);
continue;
}
fid = IGU_FID(val);
- if ((fid & IGU_FID_ENCODE_IS_PF)) {
+ if (fid & IGU_FID_ENCODE_IS_PF) {
if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
continue;
}
REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
}
-
-/*
- * Enable internal target-read (in case we are probed after PF
- * FLR). Must be done prior to any BAR read access. Only for
- * 57712 and up
- */
- if (!CHIP_IS_E1x(sc)) {
- REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
- 1);
- }
}
/* get the nvram size */
bnx2x_init_rte(sc);
if (IS_PF(sc)) {
-/* get device info and set params */
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
+ 1);
+ DELAY(200000);
+ }
+
+ /* get device info and set params */
if (bnx2x_get_device_info(sc) != 0) {
PMD_DRV_LOG(NOTICE, "getting device info");
return -ENXIO;
/* get phy settings from shmem and 'and' against admin settings */
bnx2x_get_phy_info(sc);
} else {
-/* Left mac of VF unfilled, PF should set it for VF */
+ /* Left mac of VF unfilled, PF should set it for VF */
memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
}
for (i = 0; i < L2_ILT_LINES(sc); i++) {
ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
- (phys_addr_t)sc->context[i].vcxt_dma.paddr;
+ (rte_iova_t)sc->context[i].vcxt_dma.paddr;
ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
}
ecore_ilt_init_op(sc, INITOP_SET);
}
}
-static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, phys_addr_t addr)
+static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, rte_iova_t addr)
{
int reg;
uint32_t wb_write[2];
}
static void
-ecore_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr,
+ecore_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
uint32_t addr, uint32_t len)
{
bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len);