net/bnxt: support to set VF rxmode
authorAjit Khaparde <ajit.khaparde@broadcom.com>
Thu, 1 Jun 2017 17:07:22 +0000 (12:07 -0500)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 12 Jun 2017 09:41:29 +0000 (10:41 +0100)
This patch adds support to configure the VF L2 Rx settings.
The per VF setting is maintained in bnxt_child_vf_info.l2_rx_mask

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
app/test-pmd/cmdline.c
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_hwrm.h
drivers/net/bnxt/bnxt_rxq.c
drivers/net/bnxt/bnxt_vnic.h
drivers/net/bnxt/rte_pmd_bnxt.c
drivers/net/bnxt/rte_pmd_bnxt.h
drivers/net/bnxt/rte_pmd_bnxt_version.map

index 7ccf781..9354270 100644 (file)
@@ -6817,7 +6817,7 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
                       __attribute__((unused)) struct cmdline *cl,
                       __attribute__((unused)) void *data)
 {
-       int ret;
+       int ret = -ENOTSUP;
        uint16_t rx_mode = 0;
        struct cmd_set_vf_rxmode *res = parsed_result;
 
@@ -6833,7 +6833,16 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
                        rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST;
        }
 
-       ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id, rx_mode, (uint8_t)is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (ret == -ENOTSUP)
+               ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id,
+                                                 rx_mode, (uint8_t)is_on);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+       if (ret == -ENOTSUP)
+               ret = rte_pmd_bnxt_set_vf_rxmode(res->port_id, res->vf_id,
+                                                rx_mode, (uint8_t)is_on);
+#endif
        if (ret < 0)
                printf("bad VF receive mode parameter, return code = %d \n",
                ret);
@@ -13799,9 +13808,9 @@ cmdline_parse_ctx_t main_ctx[] = {
        (cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
        (cmdline_parse_inst_t *)&cmd_set_macsec_sc,
        (cmdline_parse_inst_t *)&cmd_set_macsec_sa,
-       (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
        (cmdline_parse_inst_t *)&cmd_set_vf_traffic,
 #endif
+       (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
        (cmdline_parse_inst_t *)&cmd_vf_rate_limit,
        (cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter,
        (cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
index bf0526d..6d98d5a 100644 (file)
@@ -229,23 +229,29 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
        /* FIXME add multicast flag, when multicast adding options is supported
         * by ethtool.
         */
+       if (vnic->flags & BNXT_VNIC_INFO_BCAST)
+               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+       if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
        if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
-               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
        if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+       if (vnic->flags & BNXT_VNIC_INFO_MCAST)
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
        if (vnic->mc_addr_cnt) {
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
                req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
                req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
        }
-       req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
-                                   mask);
        if (vlan_count && vlan_table) {
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
                req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
                         rte_mem_virt2phy(vlan_table));
                req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
        }
+       req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
+                                   mask);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -2317,6 +2323,18 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf)
        return rc;
 }
 
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
+{
+       uint32_t *flag = flagp;
+
+       vnic->flags = *flag;
+}
+
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+       return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 {
        int rc = 0;
index 33d2885..a603dda 100644 (file)
@@ -143,6 +143,8 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp);
 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on);
 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp);
 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf);
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp);
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 int bnxt_vf_default_vnic_count(struct bnxt *bp, uint16_t vf);
 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
        void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
index d8b970e..0793820 100644 (file)
@@ -76,6 +76,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                        rc = -ENOMEM;
                        goto err_out;
                }
+               vnic->flags |= BNXT_VNIC_INFO_BCAST;
                STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
                bp->nr_vnics++;
 
@@ -120,6 +121,9 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                }
                /* For each pool, allocate MACVLAN CFA rule & VNIC */
                if (!pools) {
+                       pools = RTE_MIN(bp->max_vnics,
+                           RTE_MIN(bp->max_l2_ctx,
+                            RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS)));
                        RTE_LOG(ERR, PMD,
                                "VMDq pool not set, defaulted to 64\n");
                        pools = ETH_64_POOLS;
@@ -137,6 +141,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                                rc = -ENOMEM;
                                goto err_out;
                        }
+                       vnic->flags |= BNXT_VNIC_INFO_BCAST;
                        STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
                        bp->nr_vnics++;
 
@@ -177,6 +182,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                rc = -ENOMEM;
                goto err_out;
        }
+       vnic->flags |= BNXT_VNIC_INFO_BCAST;
        /* Partition the rx queues for the single pool */
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
                rxq = bp->eth_dev->data->rx_queues[i];
@@ -295,7 +301,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        int rc = 0;
 
        if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
-               RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+               RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
                rc = -EINVAL;
                goto out;
        }
@@ -308,7 +314,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (!rxq) {
-               RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
+               RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
                rc = -ENOMEM;
                goto out;
        }
@@ -333,7 +339,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        /* Allocate RX ring hardware descriptors */
        if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
                        "rxr")) {
-               RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
+               RTE_LOG(ERR, PMD,
+                       "ring_dma_zone_reserve for rx_ring failed!\n");
                bnxt_rx_queue_release_op(rxq);
                rc = -ENOMEM;
                goto out;
index 0d50224..993f221 100644 (file)
@@ -64,6 +64,11 @@ struct bnxt_vnic_info {
        uint32_t        flags;
 #define BNXT_VNIC_INFO_PROMISC                 (1 << 0)
 #define BNXT_VNIC_INFO_ALLMULTI                        (1 << 1)
+#define BNXT_VNIC_INFO_BCAST                   (1 << 2)
+#define BNXT_VNIC_INFO_UCAST                   (1 << 3)
+#define BNXT_VNIC_INFO_MCAST                   (1 << 4)
+#define BNXT_VNIC_INFO_TAGGED                  (1 << 5)
+#define BNXT_VNIC_INFO_UNTAGGED                        (1 << 6)
 
        uint16_t        cos_rule;
        uint16_t        lb_rule;
index 2d4d6f3..0542034 100644 (file)
@@ -377,6 +377,60 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
        return rc;
 }
 
+int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+                               uint16_t rx_mask, uint8_t on)
+{
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info dev_info;
+       uint16_t flag = 0;
+       struct bnxt *bp;
+       int rc;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       if (!is_bnxt_supported(dev))
+               return -ENOTSUP;
+
+       rte_eth_dev_info_get(port, &dev_info);
+       bp = (struct bnxt *)dev->data->dev_private;
+
+       if (!bp->pf.vf_info)
+               return -EINVAL;
+
+       if (vf >= bp->pdev->max_vfs)
+               return -EINVAL;
+
+       if (rx_mask & (ETH_VMDQ_ACCEPT_UNTAG | ETH_VMDQ_ACCEPT_HASH_MC)) {
+               RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
+               return -ENOTSUP;
+       }
+
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC && !on) {
+               RTE_LOG(ERR, PMD, "Currently cannot disable UC Rx\n");
+               return -ENOTSUP;
+       }
+
+       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+               flag |= BNXT_VNIC_INFO_BCAST;
+       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+               flag |= BNXT_VNIC_INFO_ALLMULTI;
+
+       if (on)
+               bp->pf.vf_info[vf].l2_rx_mask |= flag;
+       else
+               bp->pf.vf_info[vf].l2_rx_mask &= ~flag;
+
+       rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf,
+                                       vf_vnic_set_rxmask_cb,
+                                       &bp->pf.vf_info[vf].l2_rx_mask,
+                                       bnxt_set_rx_mask_no_vlan);
+       if (rc)
+               RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+
+       return rc;
+}
+
 int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
                                    uint64_t vf_mask, uint8_t vlan_on)
 {
index 2144601..db9ce77 100644 (file)
@@ -237,6 +237,25 @@ int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
  */
 int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
 
+/**
+ * Set RX L2 Filtering mode of a VF of an Ethernet device.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param vf
+ *   VF id.
+ * @param rx_mask
+ *    The RX mode mask
+ * @param on
+ *    1 - Enable a VF RX mode.
+ *    0 - Disable a VF RX mode.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+                               uint16_t rx_mask, uint8_t on);
 
 /**
  * Returns the number of default RX queues on a VF
index cea61d8..974e240 100644 (file)
@@ -11,6 +11,7 @@ DPDK_17.08 {
        rte_pmd_bnxt_set_vf_mac_addr;
        rte_pmd_bnxt_set_vf_mac_anti_spoof;
        rte_pmd_bnxt_set_vf_rate_limit;
+       rte_pmd_bnxt_set_vf_rxmode;
        rte_pmd_bnxt_set_vf_vlan_anti_spoof;
        rte_pmd_bnxt_set_vf_vlan_filter;
        rte_pmd_bnxt_set_vf_vlan_insert;