unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
+ unsigned int rx_num_qs_per_vnic;
struct bnxt_rx_queue **rx_queues;
const void *rx_mem_zone;
struct rx_port_stats *hw_rx_port_stats;
for (i = 0; i < bp->nr_vnics; i++) {
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
- uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
- vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
- if (!vnic->fw_grp_ids) {
- PMD_DRV_LOG(ERR,
- "Failed to alloc %d bytes for group ids\n",
- size);
- rc = -ENOMEM;
+ rc = bnxt_vnic_grp_alloc(bp, vnic);
+ if (rc)
goto err_out;
- }
- memset(vnic->fw_grp_ids, -1, size);
PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
i, vnic, vnic->fw_grp_ids);
goto err_out;
}
- for (j = 0; j < bp->rx_nr_rings; j++) {
+ for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
rxq = bp->eth_dev->data->rx_queues[j];
PMD_DRV_LOG(DEBUG,
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic;
- uint16_t hash_type = 0;
- unsigned int i;
int rc;
rc = is_bnxt_in_error(bp);
bp->flags |= BNXT_FLAG_UPDATE_HASH;
memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
- if (rss_conf->rss_hf & ETH_RSS_IPV4)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
- if (rss_conf->rss_hf & ETH_RSS_IPV6)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
-
- /* Update the RSS VNIC(s) */
- for (i = 0; i < bp->nr_vnics; i++) {
- vnic = &bp->vnic_info[i];
- vnic->hash_type = hash_type;
+ /* Update the default RSS VNIC(s) */
+ vnic = &bp->vnic_info[0];
+ vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
- /*
- * Use the supplied key if the key length is
- * acceptable and the rss_key is not NULL
- */
- if (rss_conf->rss_key &&
- rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
- memcpy(vnic->rss_hash_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
+ /*
+ * Use the supplied key if the key length is
+ * acceptable and the rss_key is not NULL
+ */
+ if (rss_conf->rss_key && rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
+ memcpy(vnic->rss_hash_key,
+ rss_conf->rss_key,
+ rss_conf->rss_key_len);
- bnxt_hwrm_vnic_rss_cfg(bp, vnic);
- }
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
return 0;
}
int ret = 0;
uint32_t tun_type;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow, Not a Trusted VF!");
+ return NULL;
+ }
+
flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
if (!flow) {
rte_flow_error_set(error, ENOMEM,
struct bnxt_vnic_info *vnic = flow->vnic;
int ret = 0;
+ if (!filter) {
+ ret = -EINVAL;
+ goto done;
+ }
+
if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
filter->enables == filter->tunnel_type) {
ret = bnxt_handle_tunnel_redirect_destroy(bp,
else
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
+ bnxt_free_filter(bp, filter);
//if (rc)
//break;
}
}
}
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+ bp->rx_num_qs_per_vnic = nb_q_per_grp;
PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
pools, nb_q_per_grp);
start_grp_id = 0;
out:
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
- uint16_t hash_type = 0;
if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
rss = &bp->rss_conf;
bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
}
- if (rss->rss_hf & ETH_RSS_IPV4)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
- if (rss->rss_hf & ETH_RSS_IPV6)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
-
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- vnic->hash_type = hash_type;
+ vnic->hash_type =
+ bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
/*
* Use the supplied key if the key length is
vnic, bp->grp_info[rx_queue_id].fw_grp_id);
}
- rc = bnxt_vnic_rss_configure(bp, vnic);
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
+ if (vnic->rx_queue_cnt > 1)
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
if (rc == 0)
vnic = rxq->vnic;
if (BNXT_HAS_RING_GRPS(bp))
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
- rc = bnxt_vnic_rss_configure(bp, vnic);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
+ if (vnic->rx_queue_cnt > 1)
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
if (rc == 0)
bp->vnic_info = vnic_mem;
return 0;
}
+
+int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+
+ vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
+ if (!vnic->fw_grp_ids) {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %d bytes for group ids\n",
+ size);
+ return -ENOMEM;
+ }
+ memset(vnic->fw_grp_ids, -1, size);
+
+ return 0;
+}
+
+uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
+{
+ uint16_t hwrm_type = 0;
+
+ if (rte_type & ETH_RSS_IPV4)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ if (rte_type & ETH_RSS_IPV6)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+ hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+
+ return hwrm_type;
+}
uint16_t cos_rule;
uint16_t lb_rule;
+ uint16_t rx_queue_cnt;
bool vlan_strip;
bool func_default;
bool bd_stall;
int bnxt_alloc_vnic_attributes(struct bnxt *bp);
void bnxt_free_vnic_mem(struct bnxt *bp);
int bnxt_alloc_vnic_mem(struct bnxt *bp);
+int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type);
#endif