/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
* All rights reserved.
*/
* VNIC Functions
*/
-static void prandom_bytes(void *dest_ptr, size_t len)
+void prandom_bytes(void *dest_ptr, size_t len)
{
char *dest = (char *)dest_ptr;
uint64_t rb;
}
}
-void bnxt_init_vnics(struct bnxt *bp)
+static void bnxt_init_vnics(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
uint16_t max_vnics;
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->hash_mode =
HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
+ vnic->rx_queue_cnt = 0;
- prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
STAILQ_INIT(&vnic->filter);
STAILQ_INIT(&vnic->flow_list);
STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
void bnxt_free_all_vnics(struct bnxt *bp)
{
- struct bnxt_vnic_info *temp;
+ struct bnxt_vnic_info *vnic;
unsigned int i;
- for (i = 0; i < bp->nr_vnics; i++) {
- temp = &bp->vnic_info[i];
- STAILQ_INSERT_TAIL(&bp->free_vnic_list, temp, next);
+ if (bp->vnic_info == NULL)
+ return;
+
+ for (i = 0; i < bp->max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
+ vnic->rx_queue_cnt = 0;
}
}
entry_length = HW_HASH_KEY_SIZE +
BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN;
- if (BNXT_CHIP_THOR(bp))
- rss_table_size = BNXT_RSS_TBL_SIZE_THOR *
+ if (BNXT_CHIP_P5(bp))
+ rss_table_size = BNXT_RSS_TBL_SIZE_P5 *
2 * sizeof(*vnic->rss_table);
else
rss_table_size = HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
max_vnics = bp->max_vnics;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
- "bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,
+ "bnxt_" PCI_PRI_FMT "_vnicattr", pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
return -ENOMEM;
}
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(WARNING,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(WARNING,
- "Using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map to physical memory\n");
- return -ENOMEM;
- }
- }
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
HW_HASH_KEY_SIZE);
vnic->mc_list_dma_addr = vnic->rss_hash_key_dma_addr +
HW_HASH_KEY_SIZE;
+ prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
}
return 0;
return -ENOMEM;
}
bp->vnic_info = vnic_mem;
+ bnxt_init_vnics(bp);
return 0;
}
+
+int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+
+ vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
+ if (!vnic->fw_grp_ids) {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %d bytes for group ids\n",
+ size);
+ return -ENOMEM;
+ }
+ memset(vnic->fw_grp_ids, -1, size);
+
+ return 0;
+}
+
+uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
+{
+ uint16_t hwrm_type = 0;
+
+ if (rte_type & ETH_RSS_IPV4)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ if (rte_type & ETH_RSS_IPV6)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+
+ return hwrm_type;
+}
+
+int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
+{
+ uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
+ bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
+ bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV6_TCP));
+ bool l3_only = l3 && !l4;
+ bool l3_and_l4 = l3 && l4;
+
+ /* If FW has not advertised capability to configure outer/inner
+ * RSS hashing , just log a message. HW will work in default RSS mode.
+ */
+ if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS)) {
+ PMD_DRV_LOG(ERR, "RSS hash level cannot be configured\n");
+ return mode;
+ }
+
+ switch (lvl) {
+ case BNXT_RSS_LEVEL_INNERMOST:
+ if (l3_and_l4 || l4)
+ mode =
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4;
+ else if (l3_only)
+ mode =
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2;
+ break;
+ case BNXT_RSS_LEVEL_OUTERMOST:
+ if (l3_and_l4 || l4)
+ mode =
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4;
+ else if (l3_only)
+ mode =
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2;
+ break;
+ default:
+ mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
+ break;
+ }
+
+ return mode;
+}
+
+uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
+{
+ uint64_t rss_level = 0;
+
+ /* If FW has not advertised capability to configure inner/outer RSS
+ * return default hash mode.
+ */
+ if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
+ return ETH_RSS_LEVEL_PMD_DEFAULT;
+
+ if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
+ mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
+ rss_level |= ETH_RSS_LEVEL_OUTERMOST;
+ else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
+ mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
+ rss_level |= ETH_RSS_LEVEL_INNERMOST;
+ else
+ rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
+
+ return rss_level;
+}