-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
#include "mcp_public.h"
#include "ecore_iro.h"
#include "nvm_cfg.h"
-#include "ecore_dev_api.h"
#include "ecore_dcbx.h"
#include "ecore_l2.h"
* there's more than a single compiled ecore component in system].
*/
static osal_spinlock_t qm_lock;
-static bool qm_lock_init;
+static u32 qm_lock_ref_cnt;
+
+/******************** Doorbell Recovery *******************/
+/* The doorbell recovery mechanism consists of a list of entries which represent
+ * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
+ * entity needs to register with the mechanism and provide the parameters
+ * describing it's doorbell, including a location where last used doorbell data
+ * can be found. The doorbell execute function will traverse the list and
+ * doorbell all of the registered entries.
+ */
+struct ecore_db_recovery_entry {
+ osal_list_entry_t list_entry;
+ void OSAL_IOMEM *db_addr;
+ void *db_data;
+ enum ecore_db_rec_width db_width;
+ enum ecore_db_rec_space db_space;
+ u8 hwfn_idx;
+};
+
+/* display a single doorbell recovery entry */
+void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry,
+ const char *action)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
+ action, db_entry, db_entry->db_addr, db_entry->db_data,
+ db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
+ db_entry->db_space == DB_REC_USER ? "user" : "kernel",
+ db_entry->hwfn_idx);
+}
+
+/* doorbell address sanity (address within doorbell bar range) */
+bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
+ void *db_data)
+{
+ /* make sure doorbell address is within the doorbell bar */
+ if (db_addr < p_dev->doorbells || (u8 *)db_addr >
+ (u8 *)p_dev->doorbells + p_dev->db_size) {
+ OSAL_WARN(true,
+ "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
+ db_addr, p_dev->doorbells,
+ (u8 *)p_dev->doorbells + p_dev->db_size);
+ return false;
+ }
+
+ /* make sure doorbell data pointer is not null */
+ if (!db_data) {
+ OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data);
+ return false;
+ }
+
+ return true;
+}
+
+/* find hwfn according to the doorbell address */
+struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr)
+{
+ struct ecore_hwfn *p_hwfn;
+
+ /* In CMT doorbell bar is split down the middle between engine 0 and
+ * enigne 1
+ */
+ if (ECORE_IS_CMT(p_dev))
+ p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
+ &p_dev->hwfns[0] : &p_dev->hwfns[1];
+ else
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+
+ return p_hwfn;
+}
+
+/* add a new entry to the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data,
+ enum ecore_db_rec_width db_width,
+ enum ecore_db_rec_space db_space)
+{
+ struct ecore_db_recovery_entry *db_entry;
+ struct ecore_hwfn *p_hwfn;
+
+ /* shortcircuit VFs, for now */
+ if (IS_VF(p_dev)) {
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* sanitize doorbell address */
+ if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+ return ECORE_INVAL;
+
+ /* obtain hwfn from doorbell address */
+ p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+
+ /* create entry */
+ db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry));
+ if (!db_entry) {
+ DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n");
+ return ECORE_NOMEM;
+ }
+
+ /* populate entry */
+ db_entry->db_addr = db_addr;
+ db_entry->db_data = db_data;
+ db_entry->db_width = db_width;
+ db_entry->db_space = db_space;
+ db_entry->hwfn_idx = p_hwfn->my_id;
+
+ /* display */
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_PUSH_TAIL(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+ return ECORE_SUCCESS;
+}
+
+/* remove an entry from the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_hwfn *p_hwfn;
+
+ /* shortcircuit VFs, for now */
+ if (IS_VF(p_dev)) {
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* sanitize doorbell address */
+ if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+ return ECORE_INVAL;
+
+ /* obtain hwfn from doorbell address */
+ p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ /* search according to db_data addr since db_addr is not unique
+ * (roce)
+ */
+ if (db_entry->db_data == db_data) {
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry,
+ "Deleting");
+ OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ rc = ECORE_SUCCESS;
+ break;
+ }
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+ if (rc == ECORE_INVAL)
+ /*OSAL_WARN(true,*/
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
+ db_data, db_addr);
+ else
+ OSAL_FREE(p_dev, db_entry);
+
+ return rc;
+}
+
+/* initialize the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n");
+
+ /* make sure db_size was set in p_dev */
+ if (!p_hwfn->p_dev->db_size) {
+ DP_ERR(p_hwfn->p_dev, "db_size not set\n");
+ return ECORE_INVAL;
+ }
+
+ OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
+ return ECORE_NOMEM;
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+
+ return ECORE_SUCCESS;
+}
+
+/* destroy the doorbell recovery mechanism */
+void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n");
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
+ DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
+ while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
+ db_entry = OSAL_LIST_FIRST_ENTRY(
+ &p_hwfn->db_recovery_info.list,
+ struct ecore_db_recovery_entry,
+ list_entry);
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
+ OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ OSAL_FREE(p_hwfn->p_dev, db_entry);
+ }
+ }
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock);
+#endif
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+}
+
+/* print the content of the doorbell recovery mechanism */
+void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ DP_NOTICE(p_hwfn, false,
+ "Dispalying doorbell recovery database. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+}
+
+/* ring the doorbell of a single doorbell recovery entry */
+void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry,
+ enum ecore_db_rec_exec db_exec)
+{
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n",
+ db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+ db_entry->db_addr, *(u32 *)db_entry->db_data);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %lx\n",
+ db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+ db_entry->db_addr,
+ *(unsigned long *)(db_entry->db_data));
+
+ /* Sanity */
+ if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr,
+ db_entry->db_data))
+ return;
+
+ /* Flush the write combined buffer. Since there are multiple doorbelling
+ * entities using the same address, if we don't flush, a transaction
+ * could be lost.
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ /* Ring the doorbell */
+ if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(p_hwfn, db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(p_hwfn, db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+ }
+
+ /* Flush the write combined buffer. Next doorbell may come from a
+ * different entity to the same address...
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+}
+
+/* traverse the doorbell recovery entry list and ring all the doorbells */
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
+ enum ecore_db_rec_exec db_exec)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ if (db_exec != DB_REC_ONCE) {
+ DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
+ }
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ ecore_db_recovery_ring(p_hwfn, db_entry, db_exec);
+ if (db_exec == DB_REC_ONCE)
+ break;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+}
+/******************** Doorbell Recovery end ****************/
/* Configurable */
#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to
*/
/* Derived */
-#define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
+#define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS)
-enum BAR_ID {
- BAR_ID_0, /* used for GRC */
- BAR_ID_1 /* Used for doorbells */
-};
-
-static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
+static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
u32 val;
- if (IS_VF(p_hwfn->p_dev)) {
- /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be
- * read from actual register, but we're currently not using
- * it for actual doorbelling.
- */
- return 1 << 17;
- }
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_hw_bar_size(p_hwfn, bar_id);
- val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ val = ecore_rd(p_hwfn, p_ptt, bar_reg);
if (val)
return 1 << (val + 15);
* they were found to be useful MFW started updating them from 8.7.7.0.
* In older MFW versions they are set to 0 which means disabled.
*/
- if (p_hwfn->p_dev->num_hwfns > 1) {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size of 256kB"
- " for GRC and 512kB for DB\n");
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
} else {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size of 512kB"
- " for GRC and 512kB for DB\n");
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
val = 512 * 1024;
}
}
}
-void ecore_init_struct(struct ecore_dev *p_dev)
+enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
{
u8 i;
p_hwfn->my_id = i;
p_hwfn->b_active = false;
- OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
- OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
+ goto handle_err;
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
}
/* hwfn 0 is always active */
/* set the default cache alignment to 128 (may be overridden later) */
p_dev->cache_shift = 7;
+ return ECORE_SUCCESS;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+handle_err:
+ while (--i) {
+ struct ecore_hwfn *p_hwfn = OSAL_NULL;
+
+ p_hwfn = &p_dev->hwfns[i];
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
+ }
+ return ECORE_NOMEM;
+#endif
}
static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
}
+static void ecore_dbg_user_data_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->dbg_user_info);
+ p_hwfn->dbg_user_info = OSAL_NULL;
+}
+
void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
ecore_iov_free(p_hwfn);
ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
- ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+ ecore_dcbx_info_free(p_hwfn);
+ ecore_dbg_user_data_free(p_hwfn);
/* @@@TBD Flush work-queue ? */
+
+ /* destroy doorbell recovery mechanism */
+ ecore_db_recovery_teardown(p_hwfn);
}
}
/* feature flags */
if (IS_ECORE_SRIOV(p_hwfn->p_dev))
flags |= PQ_FLAGS_VFS;
+ if (IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_RLS;
/* protocol flags */
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH:
- flags |= PQ_FLAGS_MCOS;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
break;
case ECORE_PCI_FCOE:
flags |= PQ_FLAGS_OFLD;
flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
break;
case ECORE_PCI_ETH_ROCE:
- flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+ flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
break;
case ECORE_PCI_ETH_IWARP:
- flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
- PQ_FLAGS_OFLD;
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
break;
default:
DP_ERR(p_hwfn, "unknown personality %d\n",
{
u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
- /* @DPDK */
/* num RLs can't exceed resource amount of rls or vports or the
* dcqcn qps
*/
num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
- (u16)RESC_NUM(p_hwfn, ECORE_VPORT));
+ RESC_NUM(p_hwfn, ECORE_VPORT));
/* make sure after we reserve the default and VF rls we'll have
* something left
qm_info->vport_wfq_en = 1;
/* TC config is different for AH 4 port */
- four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+ four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
/* in AH 4 port we have fewer TCs per port */
qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
- u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
p_qm_port->active = 1;
p_qm_port->active_phys_tcs = active_phys_tcs;
- p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
}
"pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
/* init pq params */
+ qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
qm_info->num_vports;
qm_info->qm_pq_params[pq_idx].tc_id = tc;
if (tc > max_tc)
DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
}
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
if (vf > max_vf)
DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
}
-u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
{
u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
- if (rl > max_rl)
- DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+ /* for rate limiters, it is okay to use the modulo behavior - no
+ * DP_ERR
+ */
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + (rl % max_rl);
+}
+
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+ u16 start_pq, pq, qm_pq_idx;
+
+ pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+ start_pq = p_hwfn->qm_info.start_pq;
+ qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE;
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+ if (qm_pq_idx > p_hwfn->qm_info.num_pqs) {
+ DP_ERR(p_hwfn,
+ "qm_pq_idx %d must be smaller than %d\n",
+ qm_pq_idx, p_hwfn->qm_info.num_pqs);
+ }
+
+ return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id;
}
/* Functions for creating specific types of pqs */
qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
/* port table */
- for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
+ for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
port = &qm_info->qm_port_params[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"port idx %d, active %d, active_phys_tcs %d,"
for (i = 0; i < qm_info->num_pqs; i++) {
pq = &qm_info->qm_pq_params[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "pq idx %d, vport_id %d, tc %d, wrr_grp %d,"
- " rl_valid %d\n",
- qm_info->start_pq + i, pq->vport_id, pq->tc_id,
- pq->wrr_group, pq->rl_valid);
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ qm_info->start_pq + i, pq->port_id, pq->vport_id,
+ pq->tc_id, pq->wrr_group, pq->rl_valid);
}
}
ecore_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- ecore_qm_init_pf(p_hwfn);
+ ecore_qm_init_pf(p_hwfn, p_ptt, false);
/* activate init tool on runtime array */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(struct init_qm_port_params) *
- p_hwfn->p_dev->num_ports_in_engines);
+ p_hwfn->p_dev->num_ports_in_engine);
if (!qm_info->qm_port_params)
goto alloc_err;
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
u32 n_eqes, num_cons;
+ /* initialize the doorbell recovery mechanism */
+ rc = ecore_db_recovery_setup(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* First allocate the context manager structure */
rc = ecore_cxt_mngr_alloc(p_hwfn);
if (rc)
goto alloc_err;
- /* Set the HW cid/tid numbers (in the contest manager)
+ /* Set the HW cid/tid numbers (in the context manager)
* Must be done prior to any further computations.
*/
rc = ecore_cxt_set_pf_params(p_hwfn);
/* DMA info initialization */
rc = ecore_dmae_info_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for dmae_info"
- " structure\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
/* DCBX initialization */
rc = ecore_dcbx_info_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate memory for dcbx structure\n");
goto alloc_err;
}
- }
+
+ rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate dbg user info structure\n");
+ goto alloc_err;
+ }
+ } /* hwfn loop */
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->reset_stats));
if (!p_dev->reset_stats) {
- DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n");
goto alloc_no_mem;
}
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
ecore_l2_setup(p_hwfn);
- ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_iov_setup(p_hwfn);
}
}
}
/* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
- switch (p_hwfn->p_dev->num_ports_in_engines) {
+ switch (p_hwfn->p_dev->num_ports_in_engine) {
case 1:
hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
break;
default:
DP_NOTICE(p_hwfn, true,
"num_ports_in_engine = %d not supported\n",
- p_hwfn->p_dev->num_ports_in_engines);
+ p_hwfn->p_dev->num_ports_in_engine);
return ECORE_INVAL;
}
- switch (p_hwfn->p_dev->mf_mode) {
- case ECORE_MF_DEFAULT:
- case ECORE_MF_NPAR:
- hw_mode |= 1 << MODE_MF_SI;
- break;
- case ECORE_MF_OVLAN:
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
+ &p_hwfn->p_dev->mf_bits))
hw_mode |= 1 << MODE_MF_SD;
- break;
- default:
- DP_NOTICE(p_hwfn, true,
- "Unsupported MF mode, init as DEFAULT\n");
+ else
hw_mode |= 1 << MODE_MF_SI;
- }
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
#endif
hw_mode |= 1 << MODE_ASIC;
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
hw_mode |= 1 << MODE_100G;
p_hwfn->hw_info.hw_mode = hw_mode;
if (ECORE_IS_AH(p_dev)) {
/* 2 for 4-port, 1 for 2-port, 0 for 1-port */
ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
- (p_dev->num_ports_in_engines >> 1));
+ (p_dev->num_ports_in_engine >> 1));
ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
- p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ p_dev->num_ports_in_engine == 4 ? 0 : 3);
}
}
}
}
+static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val, wr_mbs, cache_line_size;
+
+ val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+ switch (val) {
+ case 0:
+ wr_mbs = 128;
+ break;
+ case 1:
+ wr_mbs = 256;
+ break;
+ case 2:
+ wr_mbs = 512;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ val);
+ return;
+ }
+
+ cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs);
+ switch (cache_line_size) {
+ case 32:
+ val = 0;
+ break;
+ case 64:
+ val = 1;
+ break;
+ case 128:
+ val = 2;
+ break;
+ case 256:
+ val = 3;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ cache_line_size);
+ }
+
+ if (wr_mbs < OSAL_CACHE_LINE_SIZE)
+ DP_INFO(p_hwfn,
+ "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+ OSAL_CACHE_LINE_SIZE, wr_mbs);
+
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+ if (val > 0) {
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
+ }
+}
+
static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int hw_mode)
ecore_init_cau_rt_data(p_dev);
/* Program GTT windows */
- ecore_gtt_init(p_hwfn);
+ ecore_gtt_init(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev)) {
- rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+ rc = ecore_hw_init_chip(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS)
return rc;
}
}
ecore_qm_common_rt_init(p_hwfn,
- p_dev->num_ports_in_engines,
+ p_dev->num_ports_in_engine,
qm_info->max_phys_tcs_per_port,
qm_info->pf_rl_en, qm_info->pf_wfq_en,
qm_info->vport_rl_en, qm_info->vport_wfq_en,
ecore_cxt_hw_init_common(p_hwfn);
- rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ ecore_init_cache_line_size(p_hwfn, p_ptt);
+
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn),
+ hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
{
- u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
- u32 dpi_bit_shift, dpi_count;
+ u32 dpi_bit_shift, dpi_count, dpi_page_size;
u32 min_dpis;
+ u32 n_wids;
/* Calculate DPI size
* ------------------
* 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
* containing 4 WIDs.
*/
- dpi_page_size_1 = ECORE_WID_SIZE * n_cpus;
- dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE);
- dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2);
- dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size);
+ n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus);
+ dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids);
+ dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) &
+ ~(OSAL_PAGE_SIZE - 1);
dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
-
dpi_count = pwm_region_size / dpi_page_size;
min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
ECORE_ROCE_EDPM_MODE_DISABLE = 2,
};
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
+ return false;
+
+ return true;
+}
+
static enum _ecore_status_t
ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
enum _ecore_status_t rc = ECORE_SUCCESS;
u8 cond;
- db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
- if (p_hwfn->p_dev->num_hwfns > 1)
+ db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
db_bar_size /= 2;
/* Calculate doorbell regions
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
OSAL_NULL) +
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
- norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn,
+ OSAL_PAGE_SIZE);
min_addr_reg1 = norm_regsize / 4096;
pwm_regsize = db_bar_size - norm_regsize;
/* Either EDPM is mandatory, or we are attempting to allocate a
* WID per CPU.
*/
- n_cpus = OSAL_NUM_ACTIVE_CPU();
+ n_cpus = OSAL_NUM_CPUS();
rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
DP_INFO(p_hwfn,
" dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
p_hwfn->dpi_size, p_hwfn->dpi_count,
- ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ (!ecore_edpm_enabled(p_hwfn)) ?
"disabled" : "enabled");
/* Check return codes from above calls */
struct ecore_ptt *p_ptt,
int hw_mode)
{
+ u32 ppf_to_eng_sel[NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE];
+ u32 val;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 i;
+
+ /* In CMT for non-RoCE packets - use connection based classification */
+ val = ECORE_IS_CMT(p_hwfn->p_dev) ? 0x8 : 0x0;
+ for (i = 0; i < NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE; i++)
+ ppf_to_eng_sel[i] = val;
+ STORE_RT_REG_AGG(p_hwfn, NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET,
+ ppf_to_eng_sel);
+
+ /* In CMT the gate should be cleared by the 2nd hwfn */
+ if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
+ STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
+
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
return ECORE_SUCCESS;
else if (ECORE_IS_BB(p_hwfn->p_dev))
ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
} else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
/* Activate OPTE in CMT */
u32 val;
/* Update rate limit once we'll actually have a link */
p_hwfn->qm_info.pf_rl = 100000;
}
- ecore_cxt_hw_init_pf(p_hwfn);
+ ecore_cxt_hw_init_pf(p_hwfn, p_ptt);
ecore_int_igu_init_rt(p_hwfn);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
p_hwfn->hw_info.ovlan);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
+ 1);
}
/* Enable classification by MAC if needed */
/* perform debug configuration when chip is out of reset */
OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
+ /* Sanity check before the PF init sequence that uses DMAE */
+ rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+ if (rc)
+ return rc;
+
/* PF Init sequence */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc)
return rc;
/* send function start command */
- rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
+ rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, true,
"Function start ramrod failed\n");
} else {
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
-
- if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
- (1 << 2));
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
- 0x100);
- }
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH registers after start PFn\n");
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt,
- PRS_REG_SEARCH_TCP_FIRST_FRAG);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
- prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ return rc;
}
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+
+ if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
+ (1 << 2));
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
+ 0x100);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH registers after start PFn\n");
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_SEARCH_TCP_FIRST_FRAG);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
+ prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
}
- return rc;
+ return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_length);
}
+static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+}
+
+static enum _ecore_status_t
+ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_load_req_params *p_load_req,
+ struct ecore_drv_load_params *p_drv_load)
+{
+ /* Make sure that if ecore-client didn't provide inputs, all the
+ * expected defaults are indeed zero.
+ */
+ OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0);
+ OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0);
+ OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0);
+
+ OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
+
+ if (p_drv_load == OSAL_NULL)
+ goto out;
+
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load = p_drv_load->override_force_load;
+
+ /* Old MFW versions don't support timeout values other than default and
+ * none, so these values are replaced according to the fall-back action.
+ */
+
+ if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT ||
+ p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE ||
+ (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) {
+ p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+ goto out;
+ }
+
+ switch (p_drv_load->mfw_timeout_fallback) {
+ case ECORE_TO_FALLBACK_TO_NONE:
+ p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE;
+ break;
+ case ECORE_TO_FALLBACK_TO_DEFAULT:
+ p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
+ break;
+ case ECORE_TO_FALLBACK_FAIL_LOAD:
+ DP_NOTICE(p_hwfn, false,
+ "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n",
+ p_drv_load->mfw_timeout_val,
+ ECORE_LOAD_REQ_LOCK_TO_DEFAULT,
+ ECORE_LOAD_REQ_LOCK_TO_NONE);
+ return ECORE_ABORTED;
+ }
+
+ DP_INFO(p_hwfn,
+ "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n",
+ p_drv_load->mfw_timeout_val,
+ (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ?
+ "default" : "none",
+ p_load_req->timeout_val);
+out:
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
struct ecore_hw_init_params *p_params)
{
return ECORE_SUCCESS;
}
-static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
- 1 << p_hwfn->abs_pf_id);
-}
-
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ether_type;
int i;
- if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
- (p_dev->num_hwfns > 1)) {
+ if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
DP_NOTICE(p_dev, false,
"MSI mode is not supported for CMT devices\n");
return ECORE_INVAL;
if (rc != ECORE_SUCCESS)
return rc;
- OSAL_MEM_ZERO(&load_req_params, sizeof(load_req_params));
- load_req_params.drv_role = p_params->is_crash_kernel ?
- ECORE_DRV_ROLE_KDUMP :
- ECORE_DRV_ROLE_OS;
- load_req_params.timeout_val = p_params->mfw_timeout_val;
- load_req_params.avoid_eng_reset = p_params->avoid_eng_reset;
+ if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits) ||
+ OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_dev->mf_bits))) {
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits))
+ ether_type = ETHER_TYPE_VLAN;
+ else
+ ether_type = ETHER_TYPE_QINQ;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
+ ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
+
+ rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
+ p_params->p_drv_load_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
&load_req_params);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed sending a LOAD_REQ command\n");
return rc;
}
"Load request was sent. Load code: 0x%x\n",
load_code);
+ ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
+
/* CQ75580:
* When coming back from hiberbate state, the registers from
* which shadow is read initially are not initialized. It turns
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
- if (!qm_lock_init) {
+ if (!qm_lock_ref_cnt) {
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock);
+ if (rc) {
+ DP_ERR(p_hwfn, "qm_lock allocation failed\n");
+ goto qm_lock_fail;
+ }
+#endif
OSAL_SPIN_LOCK_INIT(&qm_lock);
- qm_lock_init = true;
}
+ ++qm_lock_ref_cnt;
/* Clean up chip from previous driver if such remains exist.
* This is not needed when the PF is the first one on the
}
}
- /* Log and clean previous pglue_b errors if such exist */
- ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
- ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+ /* Log and clear previous pglue_b errors if such exist */
+ ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
/* Enable the PF's internal FID_enable in the PXP */
rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
if (rc != ECORE_SUCCESS)
goto load_err;
+ /* Clear the pglue_b was_error indication.
+ * In E4 it must be done after the BME and the internal
+ * FID_enable for the PF are set, since VDMs may cause the
+ * indication to be set again.
+ */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
}
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"init phase failed for loadcode 0x%x (rc %d)\n",
load_code, rc);
goto load_err;
}
rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Sending load done failed, rc = %d\n", rc);
+ if (rc == ECORE_NOMEM) {
+ DP_NOTICE(p_hwfn, false,
+ "Sending load done was failed due to memory allocation failure\n");
+ goto load_err;
+ }
return rc;
+ }
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, &resp,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
¶m);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to send DCBX attention request\n");
return rc;
}
p_hwfn->hw_init_done = true;
}
+ if (IS_PF(p_dev)) {
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
+ &resp, ¶m);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+ }
+
+ if (IS_PF(p_dev)) {
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
+ &resp, ¶m);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+ }
+
if (IS_PF(p_dev)) {
p_hwfn = ECORE_LEADING_HWFN(p_dev);
drv_mb_param = STORM_FW_VERSION;
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update firmware version\n");
- if (!b_default_mtu)
+ if (!b_default_mtu) {
rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.mtu);
- if (rc != ECORE_SUCCESS)
- DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ }
rc = ecore_mcp_ov_update_driver_state(p_hwfn,
p_hwfn->p_main_ptt,
ECORE_OV_DRIVER_STATE_DISABLED);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update driver state\n");
+
+ rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_ESWITCH_NONE);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
}
return rc;
load_err:
+ --qm_lock_ref_cnt;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (!qm_lock_ref_cnt)
+ OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
+qm_lock_fail:
+#endif
/* The MFW load lock should be released regardless of success or failure
* of initialization.
* TODO: replace this with an attempt to send cancel_load.
if (i < ECORE_HW_STOP_RETRY_LIMIT)
return;
- DP_NOTICE(p_hwfn, true, "Timers linear scans are not over"
- " [Connection %02x Tasks %02x]\n",
+ DP_NOTICE(p_hwfn, false,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
(u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
(u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
}
if (!p_dev->recov_in_prog) {
rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed sending a UNLOAD_REQ command. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
rc = ecore_sp_pf_stop(p_hwfn);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
}
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point we don't expect the FW to send us async
+ * events
+ */
+
/* perform debug action after PF stop was sent */
OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+ --qm_lock_ref_cnt;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (!qm_lock_ref_cnt)
+ OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
+#endif
+
if (!p_dev->recov_in_prog) {
- ecore_mcp_unload_done(p_hwfn, p_ptt);
+ rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc == ECORE_NOMEM) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n");
+ rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
+ }
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed sending a UNLOAD_DONE command. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
return rc2;
}
-void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
+enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
{
int j;
for_each_hwfn(p_dev, j) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_ptt;
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
continue;
}
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
"Shutting down the fastpath\n");
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
+ ecore_ptt_release(p_hwfn, p_ptt);
}
+
+ return ECORE_SUCCESS;
}
-void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
{
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_ptt;
if (IS_VF(p_hwfn->p_dev))
- return;
+ return ECORE_SUCCESS;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
/* If roce info is allocated it means roce is initialized and should
* be enabled in searcher.
}
/* Re-open incoming traffic */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
}
- feat_num[ECORE_FCOE_CQ] = OSAL_MIN_T(u32, sb_cnt.cnt,
- RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
- feat_num[ECORE_ISCSI_CQ] = OSAL_MIN_T(u32, sb_cnt.cnt,
- RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ u32 *p_storage_feat = ECORE_IS_FCOE_PERSONALITY(p_hwfn) ?
+ &feat_num[ECORE_FCOE_CQ] :
+ &feat_num[ECORE_ISCSI_CQ];
+ u32 limit = sb_cnt.cnt;
+
+ /* The number of queues should not exceed the number of FP SBs.
+ * In storage target, the queues are divided into pairs of a CQ
+ * and a CmdQ, and each pair uses a single SB. The limit in
+ * this case should allow a max ratio of 2:1 instead of 1:1.
+ */
+ if (p_hwfn->p_dev->b_is_target)
+ limit *= 2;
+ *p_storage_feat = OSAL_MIN_T(u32, limit,
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
+ /* @DPDK */
+ /* The size of "cq_cmdq_sb_num_arr" in the fcoe/iscsi init
+ * ramrod is limited to "NUM_OF_GLOBAL_QUEUES / 2".
+ */
+ *p_storage_feat = OSAL_MIN_T(u32, *p_storage_feat,
+ (NUM_OF_GLOBAL_QUEUES / 2));
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
"#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
static enum _ecore_status_t
__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id, u32 resc_max_val,
+ struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id,
+ u32 resc_max_val,
u32 *p_mcp_resp)
{
enum _ecore_status_t rc;
- rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
resc_max_val, p_mcp_resp);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"MFW response failure for a max value setting of resource %d [%s]\n",
res_id, ecore_hw_get_resc_name(res_id));
return rc;
}
static enum _ecore_status_t
-ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
+ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
u32 resc_max_val, mcp_resp;
continue;
}
- rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
+ rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
resc_max_val, &mcp_resp);
if (rc != ECORE_SUCCESS)
return rc;
return ECORE_SUCCESS;
}
-#define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10
-#define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
-
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
bool drv_resc_alloc)
{
struct ecore_resc_unlock_params resc_unlock_params;
* resources allocation queries should be atomic. Since several PFs can
* run in parallel - a resource lock is needed.
* If either the resource lock or resource set value commands are not
- * supported - skip the the max values setting, release the lock if
+ * supported - skip the max values setting, release the lock if
* needed, and proceed to the queries. Other failures, including a
* failure to acquire the lock, will cause this function to fail.
* Old drivers that don't acquire the lock can run in parallel, and
* their allocation values won't be affected by the updated max values.
*/
- OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params));
- resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
- resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT;
- resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US;
- resc_lock_params.sleep_b4_retry = true;
- OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
- resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
-
- rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
+ ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+ ECORE_RESC_LOCK_RESC_ALLOC, false);
+
+ rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
return rc;
} else if (rc == ECORE_NOTIMPL) {
rc = ECORE_BUSY;
goto unlock_and_exit;
} else {
- rc = ecore_hw_set_soft_resc_size(p_hwfn);
+ rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
DP_NOTICE(p_hwfn, false,
"Failed to set the max values of the soft resources\n");
} else if (rc == ECORE_NOTIMPL) {
DP_INFO(p_hwfn,
"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
- rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
goto unlock_and_exit;
if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
- rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
}
/* This will also learn the number of SBs from MFW */
- if (ecore_int_igu_reset_cam(p_hwfn, p_hwfn->p_main_ptt))
+ if (ecore_int_igu_reset_cam(p_hwfn, p_ptt))
return ECORE_INVAL;
ecore_hw_set_feat(p_hwfn);
return ECORE_SUCCESS;
unlock_and_exit:
- ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, &resc_unlock_params);
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ ecore_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
return rc;
}
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+ struct ecore_mcp_link_capabilities *p_caps;
struct ecore_mcp_link_params *link;
enum _ecore_status_t rc;
nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob,
- core_cfg);
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, core_cfg);
core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
link_temp = ecore_rd(p_hwfn, p_ptt,
OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
link->speed.advertised_speeds = link_temp;
-
- link_temp = link->speed.advertised_speeds;
- p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
+ p_caps->speed_capabilities = link->speed.advertised_speeds;
link_temp = ecore_rd(p_hwfn, p_ptt,
- port_cfg_addr +
- OFFSETOF(struct nvm_cfg1_port, link_settings));
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, link_settings));
switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
}
- p_hwfn->mcp_info->link_capabilities.default_speed =
- link->speed.forced_speed;
- p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
- link->speed.autoneg;
+ p_caps->default_speed = link->speed.forced_speed;
+ p_caps->default_speed_autoneg = link->speed.autoneg;
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
link->loopback_mode = 0;
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
+ link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, ext_phy));
+ link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
+ link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
+ p_caps->default_eee = ECORE_MCP_EEE_ENABLED;
+ link->eee.enable = true;
+ switch (link_temp) {
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
+ p_caps->default_eee = ECORE_MCP_EEE_DISABLED;
+ link->eee.enable = false;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
+ p_caps->eee_lpi_timer =
+ EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
+ break;
+ }
+
+ link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
+ link->eee.tx_lpi_enable = link->eee.enable;
+ link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV;
+ } else {
+ p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED;
+ }
+
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n EEE: %02x [%08x usec]",
link->speed.forced_speed, link->speed.advertised_speeds,
- link->speed.autoneg, link->pause.autoneg);
+ link->speed.autoneg, link->pause.autoneg,
+ p_caps->default_eee, p_caps->eee_lpi_timer);
/* Read Multi-function information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) +
- OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
switch (mf_mode) {
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_UFP_SPECIFIC |
+ 1 << ECORE_MF_8021Q_TAGGING;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_8021AD_TAGGING |
+ 1 << ECORE_MF_FIP_SPECIAL;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST |
+ 1 << ECORE_MF_INTER_PF_SWITCH |
+ 1 << ECORE_MF_DISABLE_ARFS;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST;
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ p_hwfn->p_dev->mf_bits);
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS);
+
+ /* It's funny since we have another switch, but it's easier
+ * to throw this away in linux this way. Long term, it might be
+ * better to have have getters for needed ECORE_MF_* fields,
+ * convert client code and eliminate this.
+ */
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ case NVM_CFG1_GLOB_MF_MODE_BD:
p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_UFP;
+ break;
}
- DP_INFO(p_hwfn, "Multi function mode is %08x\n",
- p_hwfn->p_dev->mf_mode);
/* Read Multi-function information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) +
- OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
if (reg_function_hide & 0x1) {
if (ECORE_IS_BB(p_dev)) {
- if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) {
+ if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
num_funcs = 0;
eng_mask = 0xaaaa;
} else {
static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 port_mode;
#ifndef ASIC_ONLY
/* Read the port mode */
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ if (CHIP_REV_IS_FPGA(p_dev))
port_mode = 4;
- else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
- (p_hwfn->p_dev->num_hwfns > 1))
+ else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
/* In CMT on emulation, assume 1 port */
port_mode = 1;
else
port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
if (port_mode < 3) {
- p_hwfn->p_dev->num_ports_in_engines = 1;
+ p_dev->num_ports_in_engine = 1;
} else if (port_mode <= 5) {
- p_hwfn->p_dev->num_ports_in_engines = 2;
+ p_dev->num_ports_in_engine = 2;
} else {
DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
- p_hwfn->p_dev->num_ports_in_engines);
+ p_dev->num_ports_in_engine);
- /* Default num_ports_in_engines to something */
- p_hwfn->p_dev->num_ports_in_engines = 1;
+ /* Default num_ports_in_engine to something */
+ p_dev->num_ports_in_engine = 1;
}
}
static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 port;
int i;
- p_hwfn->p_dev->num_ports_in_engines = 0;
+ p_dev->num_ports_in_engine = 0;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
switch ((port & 0xf000) >> 12) {
case 1:
- p_hwfn->p_dev->num_ports_in_engines = 1;
+ p_dev->num_ports_in_engine = 1;
break;
case 3:
- p_hwfn->p_dev->num_ports_in_engines = 2;
+ p_dev->num_ports_in_engine = 2;
break;
case 0xf:
- p_hwfn->p_dev->num_ports_in_engines = 4;
+ p_dev->num_ports_in_engine = 4;
break;
default:
DP_NOTICE(p_hwfn, false,
CNIG_REG_NIG_PORT0_CONF_K2_E5 +
(i * 4));
if (port & 1)
- p_hwfn->p_dev->num_ports_in_engines++;
+ p_dev->num_ports_in_engine++;
}
+
+ if (!p_dev->num_ports_in_engine) {
+ DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
+
+ /* Default num_ports_in_engine to something */
+ p_dev->num_ports_in_engine = 1;
+ }
}
static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (ECORE_IS_BB(p_hwfn->p_dev))
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ /* Determine the number of ports per engine */
+ if (ECORE_IS_BB(p_dev))
ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
else
ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
+
+ /* Get the total number of ports of the device */
+ if (ECORE_IS_CMT(p_dev)) {
+ /* In CMT there is always only one port */
+ p_dev->num_ports = 1;
+#ifndef ASIC_ONLY
+ } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
+ p_dev->num_ports = p_dev->num_ports_in_engine *
+ ecore_device_num_engines(p_dev);
+#endif
+ } else {
+ u32 addr, global_offsize, global_addr;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, max_ports);
+ p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
+ }
+}
+
+static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_link_capabilities *p_caps;
+ u32 eee_status;
+
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
+ if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED)
+ return;
+
+ p_caps->eee_speed_caps = 0;
+ eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, eee_status));
+ eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
+ EEE_SUPPORTED_SPEED_OFFSET;
+ if (eee_status & EEE_1G_SUPPORTED)
+ p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV;
+ if (eee_status & EEE_10G_ADV)
+ p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV;
}
static enum _ecore_status_t
bool drv_resc_alloc = p_params->drv_resc_alloc;
enum _ecore_status_t rc;
+ if (IS_ECORE_PACING(p_hwfn)) {
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV,
+ "Skipping IOV as packet pacing is requested\n");
+ }
+
/* Since all information is common, only first hwfns should do this */
- if (IS_LEAD_HWFN(p_hwfn)) {
+ if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) {
rc = ecore_iov_hw_info(p_hwfn);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
}
}
- /* TODO In get_hw_info, amoungst others:
- * Get MCP FW revision and determine according to it the supported
- * featrues (e.g. DCB)
- * Get boot mode
- * ecore_get_pcie_width_speed, WOL capability.
- * Number of global CQ-s (for storage
- */
- ecore_hw_info_port_num(p_hwfn, p_ptt);
+ if (IS_LEAD_HWFN(p_hwfn))
+ ecore_hw_info_port_num(p_hwfn, p_ptt);
+
+ ecore_mcp_get_capabilities(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
p_hwfn->mcp_info->func_info.ovlan;
ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+
+ ecore_mcp_get_eee_caps(p_hwfn, p_ptt);
+
+ ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
}
if (personality != ECORE_PCI_DEFAULT) {
* that can result in performance penalty in some cases. 4
* represents a good tradeoff between performance and flexibility.
*/
- p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+ if (IS_ECORE_PACING(p_hwfn))
+ p_hwfn->hw_info.num_hw_tc = 1;
+ else
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
/* start out with a single active tc. This can be increased either
* by dcbx negotiation or by upper layer driver
* the resources/features depends on them.
* This order is not harmful if not forcing.
*/
- rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
+ rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
rc = ECORE_SUCCESS;
p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
return rc;
}
-static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
+static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
- struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u16 device_id_mask;
u32 tmp;
return ECORE_ABORTED;
}
- p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_NUM);
- p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_REV);
-
- MASK_FIELD(CHIP_REV, p_dev->chip_rev);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+ p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+ p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV);
/* Learn number of HW-functions */
- tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CMT_ENABLED_FOR_PAIR);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
}
#endif
- p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_TEST_REG) >> 4;
- MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
- p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_METAL);
- MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG);
+ p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
+ p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL);
+
DP_INFO(p_dev->hwfns,
- "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n",
ECORE_IS_BB(p_dev) ? "BB" : "AH",
'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
p_dev->chip_metal);
- if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
+ if (ECORE_IS_BB_A0(p_dev)) {
DP_NOTICE(p_dev->hwfns, false,
"The chip type/rev (BB A0) is not supported!\n");
return ECORE_ABORTED;
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
if (CHIP_REV_IS_EMUL(p_dev)) {
- tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_ECO_RESERVED);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
if (tmp & (1 << 29)) {
DP_NOTICE(p_hwfn, false,
"Emulation: Running on a FULL build\n");
"Mark hw/fw uninitialized\n");
p_hwfn->hw_init_done = false;
- p_hwfn->first_on_engine = false;
ecore_ptt_invalidate(p_hwfn);
}
void OSAL_IOMEM * p_doorbells,
struct ecore_hw_prepare_params *p_params)
{
+ struct ecore_mdump_retain_data mdump_retain;
struct ecore_dev *p_dev = p_hwfn->p_dev;
struct ecore_mdump_info mdump_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Allocate PTT pool */
rc = ecore_ptt_pool_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+ DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err0;
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
- rc = ecore_get_dev_info(p_dev);
+ rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res =
/* Initialize MCP structure */
rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+ DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err1;
rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
p_params->personality, p_params);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
+ DP_NOTICE(p_hwfn, false, "Failed to get HW information\n");
goto err2;
}
/* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
* called, since among others it sets the ports number in an engine.
*/
- if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) &&
+ if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) &&
!p_dev->recov_in_prog) {
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+
+ /* Workaround for MFW issue where PF FLR does not cleanup
+ * IGU block
+ */
+ if (!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP))
+ ecore_pf_flr_igu_cleanup(p_hwfn);
}
- /* Check if mdump logs are present and update the epoch value */
- if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) {
+ /* Check if mdump logs/data are present and update the epoch value */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+#ifndef ASIC_ONLY
+ if (!CHIP_REV_IS_EMUL(p_dev)) {
+#endif
rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
&mdump_info);
- if (rc == ECORE_SUCCESS && mdump_info.num_of_logs > 0) {
+ if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
DP_NOTICE(p_hwfn, false,
"* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
- }
+
+ rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt,
+ &mdump_retain);
+ if (rc == ECORE_SUCCESS && mdump_retain.valid)
+ DP_NOTICE(p_hwfn, false,
+ "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n",
+ mdump_retain.epoch, mdump_retain.pf,
+ mdump_retain.status);
ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
p_params->epoch);
+#ifndef ASIC_ONLY
+ }
+#endif
}
/* Allocate the init RT array and initialize the init-ops engine */
rc = ecore_init_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err2;
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
p_dev->allow_mdump = p_params->allow_mdump;
+ p_hwfn->b_en_pacing = p_params->b_en_pacing;
+ p_dev->b_is_target = p_params->b_is_target;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
p_params->personality = p_hwfn->hw_info.personality;
/* initilalize 2nd hwfn if necessary */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
void OSAL_IOMEM *p_regview, *p_doorbell;
u8 OSAL_IOMEM *addr;
/* adjust bar offset for second engine */
addr = (u8 OSAL_IOMEM *)p_dev->regview +
- ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
p_regview = (void OSAL_IOMEM *)addr;
addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
- ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
p_doorbell = (void OSAL_IOMEM *)addr;
+ p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing;
/* prepare second hw function */
rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
p_doorbell, p_params);
ecore_mcp_free(p_hwfn);
ecore_hw_hwfn_free(p_hwfn);
} else {
- DP_NOTICE(p_dev, true,
- "What do we need to free when VF hwfn1 init fails\n");
+ DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n");
}
return rc;
}
ecore_hw_hwfn_free(p_hwfn);
ecore_mcp_free(p_hwfn);
- OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
+#endif
}
ecore_iov_free_hw_info(p_dev);
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
- DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
struct ecore_chain *p_chain,
struct ecore_chain_ext_pbl *ext_pbl)
{
- void *p_virt = OSAL_NULL;
- u8 *p_pbl_virt = OSAL_NULL;
- void **pp_virt_addr_tbl = OSAL_NULL;
- dma_addr_t p_phys = 0, p_pbl_phys = 0;
u32 page_cnt = p_chain->page_cnt, size, i;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+ void **pp_virt_addr_tbl = OSAL_NULL;
+ u8 *p_pbl_virt = OSAL_NULL;
+ void *p_virt = OSAL_NULL;
size = page_cnt * sizeof(*pp_virt_addr_tbl);
pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
if (!pp_virt_addr_tbl) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Failed to allocate memory for the chain virtual addresses table\n");
return ECORE_NOMEM;
}
ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt) {
- DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n");
return ECORE_NOMEM;
}
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
page_cnt);
if (rc) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Cannot allocate a chain with the given arguments:\n"
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);
struct ecore_ptt *p_ptt, u8 *p_filter)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return ECORE_SUCCESS;
high = p_filter[1] | (p_filter[0] << 8);
p_filter[0], p_filter[1], p_filter[2], p_filter[3],
p_filter[4], p_filter[5], entry_num);
- return ECORE_SUCCESS;
+ return rc;
}
static enum _ecore_status_t
struct ecore_ptt *p_ptt, u8 *p_filter)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return;
high = p_filter[1] | (p_filter[0] << 8);
enum ecore_llh_port_filter_type_t type)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return ECORE_SUCCESS;
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits))
+ return rc;
high = 0;
low = 0;
break;
}
- return ECORE_SUCCESS;
+ return rc;
}
static enum _ecore_status_t
enum ecore_llh_port_filter_type_t type)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return;
high = 0;
void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return;
if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) {
+ if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
1 << p_hwfn->abs_pf_id / 2);
}
}
-static void
-ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
+static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
{
int i;
}
static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 min_pf_rate)
+ struct ecore_ptt *p_ptt)
{
struct init_qm_vport_params *vport_params;
int i;
vport_params = p_hwfn->qm_info.qm_vport_params;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
- ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
+ ecore_init_wfq_default_param(p_hwfn);
ecore_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].vport_wfq);
non_requested_count = num_vports - req_count;
/* validate possible error cases */
- if (req_rate > min_pf_rate) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
- vport_id, req_rate, min_pf_rate);
- return ECORE_INVAL;
- }
-
if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
if (rc == ECORE_SUCCESS && use_wfq)
ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
else
- ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
return rc;
}
int i, rc = ECORE_INVAL;
/* TBD - for multiple hardware functions - that is 100 gig */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
DP_NOTICE(p_dev, false,
"WFQ configuration is not supported for this device\n");
return rc;
/* API to configure WFQ from mcp link change */
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ struct ecore_ptt *p_ptt,
u32 min_pf_rate)
{
int i;
/* TBD - for multiple hardware functions - that is 100 gig */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
DP_VERBOSE(p_dev, ECORE_MSG_LINK,
"WFQ configuration is not supported for this device\n");
return;
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- __ecore_configure_vp_wfq_on_link_change(p_hwfn,
- p_hwfn->p_dpc_ptt,
+ __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
min_pf_rate);
}
}
p_link = &p_hwfn->mcp_info->link_output;
if (p_link->min_pf_rate)
- ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
- p_link->min_pf_rate);
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) *
int ecore_device_num_ports(struct ecore_dev *p_dev)
{
- /* in CMT always only one port */
- if (p_dev->num_hwfns > 1)
- return 1;
-
- return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
+ return p_dev->num_ports;
}
void ecore_set_fw_mac_addr(__le16 *fw_msb,
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
+
+bool ecore_is_mf_fip_special(struct ecore_dev *p_dev)
+{
+ return !!OSAL_TEST_BIT(ECORE_MF_FIP_SPECIAL, &p_dev->mf_bits);
+}