#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
- #define ECORE_PATH_ID(_p_hwfn) \
- (ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
+#define ECORE_PATH_ID(_p_hwfn) \
+ (ECORE_IS_BB((_p_hwfn)->p_dev) ? ((_p_hwfn)->abs_pf_id & 1) : 0)
u8 port_id;
bool b_active;
/* HW functions */
u8 num_hwfns;
struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+#define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1)
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
/* In CMT doorbell bar is split down the middle between engine 0 and
* enigne 1
*/
- if (p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_dev))
p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
&p_dev->hwfns[0] : &p_dev->hwfns[1];
else
* they were found to be useful MFW started updating them from 8.7.7.0.
* In older MFW versions they are set to 0 which means disabled.
*/
- if (p_hwfn->p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
DP_INFO(p_hwfn,
"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
#endif
hw_mode |= 1 << MODE_ASIC;
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
hw_mode |= 1 << MODE_100G;
p_hwfn->hw_info.hw_mode = hw_mode;
u8 cond;
db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
db_bar_size /= 2;
/* Calculate doorbell regions
else if (ECORE_IS_BB(p_hwfn->p_dev))
ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
} else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
/* Activate OPTE in CMT */
u32 val;
enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
- if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
- (p_dev->num_hwfns > 1)) {
+ if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
DP_NOTICE(p_dev, false,
"MSI mode is not supported for CMT devices\n");
return ECORE_INVAL;
if (reg_function_hide & 0x1) {
if (ECORE_IS_BB(p_dev)) {
- if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) {
+ if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
num_funcs = 0;
eng_mask = 0xaaaa;
} else {
/* Read the port mode */
if (CHIP_REV_IS_FPGA(p_dev))
port_mode = 4;
- else if (CHIP_REV_IS_EMUL(p_dev) && p_dev->num_hwfns > 1)
+ else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
/* In CMT on emulation, assume 1 port */
port_mode = 1;
else
ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
/* Get the total number of ports of the device */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
/* In CMT there is always only one port */
p_dev->num_ports = 1;
#ifndef ASIC_ONLY
p_params->personality = p_hwfn->hw_info.personality;
/* initilalize 2nd hwfn if necessary */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
void OSAL_IOMEM *p_regview, *p_doorbell;
u8 OSAL_IOMEM *addr;
int i, rc = ECORE_INVAL;
/* TBD - for multiple hardware functions - that is 100 gig */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
DP_NOTICE(p_dev, false,
"WFQ configuration is not supported for this device\n");
return rc;
int i;
/* TBD - for multiple hardware functions - that is 100 gig */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
DP_VERBOSE(p_dev, ECORE_MSG_LINK,
"WFQ configuration is not supported for this device\n");
return;
load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
/* On CMT, always tell that it's engine */
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
*p_load_code = load_phase;
}
/* On 100g PFs, prevent old VFs from loading */
- if ((p_hwfn->p_dev->num_hwfns > 1) &&
+ if (ECORE_IS_CMT(p_hwfn->p_dev) &&
!(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
DP_INFO(p_hwfn,
"VF[%d] is running an old driver that doesn't support"
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
/* Share our ability to use multiple queue-ids only with VFs
PMD_INIT_FUNC_TRACE(edev);
/* Check requirements for 100G mode */
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
if (eth_dev->data->nb_rx_queues < 2 ||
eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
rte_intr_disable(&pci_dev->intr_handle);
rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
- if (edev->num_hwfns > 1)
+ if (ECORE_IS_CMT(edev))
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
}
params->update_rss_config = 1;
/* Fix up RETA for CMT mode device */
- if (edev->num_hwfns > 1)
+ if (ECORE_IS_CMT(edev))
qdev->rss_enable = qede_update_rss_parm_cmt(edev,
params);
vport_update_params.vport_id = 0;
* This is required since uio device uses only one MSI-x
* interrupt vector but we need one for each engine.
*/
- if (edev->num_hwfns > 1 && IS_PF(edev)) {
+ if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
rc = rte_eal_alarm_set(timer_period * US_PER_S,
qede_poll_sp_sb_cb,
(void *)eth_dev);
DP_INFO(edev, "flowdir is disabled\n");
break;
case RTE_FDIR_MODE_PERFECT:
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
qdev->fdir_info.arfs.arfs_enable = false;
return -ENOTSUP;
switch (filter_op) {
case RTE_ETH_FILTER_NOP:
/* Typically used to query flowdir support */
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
return -ENOTSUP;
}
switch (filter_op) {
case RTE_ETH_FILTER_NOP:
/* Typically used to query fdir support */
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
return -ENOTSUP;
}
} else {
ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
&info->num_queues);
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
info->num_queues += queues;
}