Addressed issues reported by coverity: NULL pointer dereferencing
issues, unchecked return value, uinitialized scalar value,
probable deadcode cases, unintended sign extension, bad bit
shift operation, Wrong sizeof argument (SIZEOF_MISMATCH)
Coverity issue: 343396, 345028, 344977, 345015, 345025, 344969
Coverity issue: 345014, 344966, 343437, 344993, 345007, 344988
Coverity issue: 343405, 344999, 345003
Fixes:
58f6f93c34c1 ("net/octeontx2: add module EEPROM dump")
Fixes:
38f566280abb ("net/octeontx2: add link stats operations")
Fixes:
b5dc3140448e ("net/octeontx2: support base PTP")
Fixes:
ba1b3b081edf ("net/octeontx2: support VLAN offloads")
Fixes:
092b38341859 ("net/octeontx2: add flow init and fini")
Fixes:
3da1b85b6d06 ("common/octeontx2: add FLR IRQ handler")
Fixes:
2548ab774f92 ("mempool/octeontx2: add context dump support")
Fixes:
2b71657c8660 ("common/octeontx2: add mbox request and response definition")
Signed-off-by: Harman Kalra <hkalra@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
if (!(intr & (1ULL << vf)))
continue;
- vf = 64 * i + vf;
otx2_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d",
- i, intr, vf);
+ i, intr, (64 * i + vf));
/* Clear interrupt */
otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
/* Disable the interrupt */
#include <otx2_common.h>
-#define SZ_64K (64 * 1024)
-#define SZ_1K (1 * 1024)
+#define SZ_64K (64ULL * 1024ULL)
+#define SZ_1K (1ULL * 1024ULL)
#define MBOX_SIZE SZ_64K
/* AF/PF: PF initiated, PF/VF VF initiated */
struct npa_aq_enq_req *aq;
struct npa_aq_enq_rsp *rsp;
uint32_t q;
- int rc;
+ int rc = 0;
for (q = 0; q < lf->nr_pools; q++) {
/* Skip disabled POOL */
while (count) {
void *next_sqb;
- next_sqb = *(void **)((uintptr_t)sqb_buf + ((sqes_per_sqb - 1) *
+ next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
+ ((sqes_per_sqb - 1) *
nix_sq_max_sqe_sz(txq)));
npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
(uint64_t)sqb_buf);
{
struct otx2_mbox *mbox = dev->mbox;
struct cgx_fw_data *rsp = NULL;
+ int rc;
otx2_mbox_alloc_msg_cgx_get_aux_link_info(mbox);
- otx2_mbox_process_msg(mbox, (void *)&rsp);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get fw data: %d", rc);
+ return NULL;
+ }
return rsp;
}
}
npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
- * sizeof(struct rte_bitmap),
+ * sizeof(struct rte_bitmap *),
0);
if (npc->free_entries == NULL) {
otx2_err("free_entries alloc failed");
}
npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
- * sizeof(struct rte_bitmap),
+ * sizeof(struct rte_bitmap *),
0);
if (npc->free_entries_rev == NULL) {
otx2_err("free_entries_rev alloc failed");
}
npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
- * sizeof(struct rte_bitmap),
+ * sizeof(struct rte_bitmap *),
0);
if (npc->live_entries == NULL) {
otx2_err("live_entries alloc failed");
}
npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
- * sizeof(struct rte_bitmap),
+ * sizeof(struct rte_bitmap *),
0);
if (npc->live_entries_rev == NULL) {
otx2_err("live_entries_rev alloc failed");
rte_free(npc->flow_entry_info);
if (npc_mem)
rte_free(npc_mem);
- if (nix_mem)
- rte_free(nix_mem);
return rc;
}
struct cgx_link_user_info *link)
{
struct otx2_eth_dev *otx2_dev = (struct otx2_eth_dev *)dev;
- struct rte_eth_dev *eth_dev = otx2_dev->eth_dev;
struct rte_eth_link eth_link;
+ struct rte_eth_dev *eth_dev;
- if (!link || !dev || !eth_dev->data->dev_conf.intr_conf.lsc)
+ if (!link || !dev)
+ return;
+
+ eth_dev = otx2_dev->eth_dev;
+ if (!eth_dev || !eth_dev->data->dev_conf.intr_conf.lsc)
return;
if (nix_wait_for_link_cfg(otx2_dev)) {
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
struct otx2_mbox *mbox = dev->mbox;
- uint8_t rc = 0;
+ uint8_t rc = -EINVAL;
if (otx2_dev_is_vf(dev))
return rc;
otx2_eth_dev_ptp_info_update(struct otx2_dev *dev, bool ptp_en)
{
struct otx2_eth_dev *otx2_dev = (struct otx2_eth_dev *)dev;
- struct rte_eth_dev *eth_dev = otx2_dev->eth_dev;
+ struct rte_eth_dev *eth_dev;
int i;
+ if (!dev)
+ return -EINVAL;
+
+ eth_dev = otx2_dev->eth_dev;
+ if (!eth_dev)
+ return -EINVAL;
+
otx2_dev->ptp_en = ptp_en;
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[i];
ts = rte_eth_dma_zone_reserve(eth_dev, "otx2_ts",
0, OTX2_ALIGN, OTX2_ALIGN,
dev->node);
- if (ts == NULL)
+ if (ts == NULL) {
otx2_err("Failed to allocate mem for tx tstamp addr");
+ return -ENOMEM;
+ }
dev->tstamp.tx_tstamp_iova = ts->iova;
dev->tstamp.tx_tstamp = ts->addr;
TAILQ_HEAD(otx2_nix_tm_shaper_profile_list, otx2_nix_tm_shaper_profile);
#define MAX_SCHED_WEIGHT ((uint8_t)~0)
-#define NIX_TM_RR_QUANTUM_MAX ((1 << 24) - 1)
+#define NIX_TM_RR_QUANTUM_MAX (BIT_ULL(24) - 1)
/* DEFAULT_RR_WEIGHT * NIX_TM_RR_QUANTUM_MAX / MAX_SCHED_WEIGHT */
/* = NIX_MAX_HW_MTU */
entry.kw[kwi] |= NPC_LT_LB_CTAG << mkex->lb_lt_offset;
entry.kw_mask[kwi] |= 0xFULL << mkex->lb_lt_offset;
- mcam_data = (vlan_id << 16);
+ mcam_data = ((uint32_t)vlan_id << 16);
mcam_mask = (BIT_ULL(16) - 1) << 16;
otx2_mbox_memcpy(key_data + mkex->lb_xtract.key_off,
&mcam_data, mkex->lb_xtract.len + 1);
} else {
TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
if (entry->vlan_id == vlan_id) {
- nix_vlan_mcam_free(dev, entry->mcam_idx);
+ rc = nix_vlan_mcam_free(dev, entry->mcam_idx);
+ if (rc)
+ return rc;
TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
rte_free(entry);
break;
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
uint64_t offloads = dev->rx_offloads;
struct rte_eth_rxmode *rxmode;
- int rc;
+ int rc = 0;
rxmode = ð_dev->data->dev_conf.rxmode;
vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
if (vlan->outer_vlan_tpid)
- vtag_cfg->tx.vtag0 =
- (vlan->outer_vlan_tpid << 16) | vlan_id;
+ vtag_cfg->tx.vtag0 = ((uint32_t)vlan->outer_vlan_tpid
+ << 16) | vlan_id;
else
vtag_cfg->tx.vtag0 =
((RTE_ETHER_TYPE_VLAN << 16) | vlan_id);