if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
int j, nr_ctxs = bnxt_rss_ctxts(bp);
+ /* RSS table size in Thor is 512.
+ * Cap max Rx rings to same value
+ */
if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) {
PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n",
bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5);
- PMD_DRV_LOG(ERR,
- "Only queues 0-%d will be in RSS table\n",
- BNXT_RSS_TBL_SIZE_P5 - 1);
+ goto err_out;
}
rc = 0;
if (rc)
return rc;
- if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
+ if (!BNXT_SINGLE_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Flow Control Settings cannot be modified on VF or on shared PF\n");
return -ENOTSUP;
}
ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
fw_major, fw_minor, fw_updt, fw_rsvd);
+ if (ret < 0)
+ return -EINVAL;
ret += 1; /* add the size of '\0' */
- if (fw_size < (uint32_t)ret)
+ if (fw_size < (size_t)ret)
return ret;
else
return 0;
if (rc)
return rc;
- if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- PMD_DRV_LOG(ERR,
- "PVID cannot be modified for this function\n");
+ if (!BNXT_SINGLE_PF(bp)) {
+ PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n");
return -ENOTSUP;
}
bp->vlan = on ? pvid : 0;
return 0;
}
+static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_pf_info *pf = bp->pf;
+ uint16_t port_id;
+ int i = 0;
+ uint32_t fifo;
+
+ if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5))
+ return -EINVAL;
+
+ port_id = pf->port_id;
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) {
+ rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
+ *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
+ i++;
+ }
+
+ if (i >= BNXT_PTP_RX_PND_CNT)
+ return -EBUSY;
+
+ return 0;
+}
+
static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
- if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
-/* bnxt_clr_rx_ts(bp); TBD */
- return -EBUSY;
- }
+ if (fifo & BNXT_PTP_RX_FIFO_PENDING)
+ return bnxt_clr_rx_ts(bp, ts);
*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
struct rte_ether_addr *addr;
uint64_t pool_mask;
uint32_t pool = 0;
- uint16_t i;
+ uint32_t i;
int rc;
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
struct bnxt_error_recovery_info *info = bp->recovery_info;
uint32_t reg = info->status_regs[index];
uint32_t type, offset, val = 0;
+ int ret = 0;
type = BNXT_FW_STATUS_REG_TYPE(reg);
offset = BNXT_FW_STATUS_REG_OFF(reg);
switch (type) {
case BNXT_FW_STATUS_REG_TYPE_CFG:
- rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
+ ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x",
+ offset);
break;
case BNXT_FW_STATUS_REG_TYPE_GRC:
offset = info->mapped_status_regs[index];
bp->flags |= BNXT_FLAG_FATAL_ERROR;
bp->flags |= BNXT_FLAG_FW_RESET;
+ bnxt_stop_rxtx(bp);
+
PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
if (bnxt_is_master_func(bp))
static void bnxt_cancel_fw_health_check(struct bnxt *bp)
{
- if (!bnxt_is_recovery_enabled(bp))
- return;
-
rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
}