This patch adds support to configure the VF L2 Rx settings.
The per VF setting is maintained in bnxt_child_vf_info.l2_rx_mask
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- int ret;
+ int ret = -ENOTSUP;
uint16_t rx_mode = 0;
struct cmd_set_vf_rxmode *res = parsed_result;
rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST;
}
- ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id, rx_mode, (uint8_t)is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id,
+ rx_mode, (uint8_t)is_on);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_rxmode(res->port_id, res->vf_id,
+ rx_mode, (uint8_t)is_on);
+#endif
if (ret < 0)
printf("bad VF receive mode parameter, return code = %d \n",
ret);
(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
- (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
(cmdline_parse_inst_t *)&cmd_set_vf_traffic,
#endif
+ (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
(cmdline_parse_inst_t *)&cmd_vf_rate_limit,
(cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter,
(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
/* FIXME add multicast flag, when multicast adding options is supported
* by ethtool.
*/
+ if (vnic->flags & BNXT_VNIC_INFO_BCAST)
+ mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+ if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
- mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ if (vnic->flags & BNXT_VNIC_INFO_MCAST)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
if (vnic->mc_addr_cnt) {
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
}
- req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
- mask);
if (vlan_count && vlan_table) {
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
rte_mem_virt2phy(vlan_table));
req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
}
+ req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
+ mask);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
return rc;
}
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
+{
+ uint32_t *flag = flagp;
+
+ vnic->flags = *flag;
+}
+
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
{
int rc = 0;
int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on);
int bnxt_hwrm_port_led_qcaps(struct bnxt *bp);
int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf);
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp);
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_vf_default_vnic_count(struct bnxt *bp, uint16_t vf);
int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
bp->nr_vnics++;
}
/* For each pool, allocate MACVLAN CFA rule & VNIC */
if (!pools) {
+ pools = RTE_MIN(bp->max_vnics,
+ RTE_MIN(bp->max_l2_ctx,
+ RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS)));
RTE_LOG(ERR, PMD,
"VMDq pool not set, defaulted to 64\n");
pools = ETH_64_POOLS;
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
bp->nr_vnics++;
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
/* Partition the rx queues for the single pool */
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
rxq = bp->eth_dev->data->rx_queues[i];
int rc = 0;
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
+ RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
+ RTE_LOG(ERR, PMD,
+ "ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
goto out;
uint32_t flags;
#define BNXT_VNIC_INFO_PROMISC (1 << 0)
#define BNXT_VNIC_INFO_ALLMULTI (1 << 1)
+#define BNXT_VNIC_INFO_BCAST (1 << 2)
+#define BNXT_VNIC_INFO_UCAST (1 << 3)
+#define BNXT_VNIC_INFO_MCAST (1 << 4)
+#define BNXT_VNIC_INFO_TAGGED (1 << 5)
+#define BNXT_VNIC_INFO_UNTAGGED (1 << 6)
uint16_t cos_rule;
uint16_t lb_rule;
return rc;
}
+int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ uint16_t flag = 0;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!bp->pf.vf_info)
+ return -EINVAL;
+
+ if (vf >= bp->pdev->max_vfs)
+ return -EINVAL;
+
+ if (rx_mask & (ETH_VMDQ_ACCEPT_UNTAG | ETH_VMDQ_ACCEPT_HASH_MC)) {
+ RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
+ return -ENOTSUP;
+ }
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC && !on) {
+ RTE_LOG(ERR, PMD, "Currently cannot disable UC Rx\n");
+ return -ENOTSUP;
+ }
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ flag |= BNXT_VNIC_INFO_BCAST;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ flag |= BNXT_VNIC_INFO_ALLMULTI;
+
+ if (on)
+ bp->pf.vf_info[vf].l2_rx_mask |= flag;
+ else
+ bp->pf.vf_info[vf].l2_rx_mask &= ~flag;
+
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf,
+ vf_vnic_set_rxmask_cb,
+ &bp->pf.vf_info[vf].l2_rx_mask,
+ bnxt_set_rx_mask_no_vlan);
+ if (rc)
+ RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+
+ return rc;
+}
+
int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
uint64_t vf_mask, uint8_t vlan_on)
{
*/
int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+/**
+ * Set RX L2 Filtering mode of a VF of an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param rx_mask
+ * The RX mode mask
+ * @param on
+ * 1 - Enable a VF RX mode.
+ * 0 - Disable a VF RX mode.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on);
/**
* Returns the number of default RX queues on a VF
rte_pmd_bnxt_set_vf_mac_addr;
rte_pmd_bnxt_set_vf_mac_anti_spoof;
rte_pmd_bnxt_set_vf_rate_limit;
+ rte_pmd_bnxt_set_vf_rxmode;
rte_pmd_bnxt_set_vf_vlan_anti_spoof;
rte_pmd_bnxt_set_vf_vlan_filter;
rte_pmd_bnxt_set_vf_vlan_insert;