STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
filter->mac_index = index;
memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
- return bnxt_hwrm_set_filter(bp, vnic, filter);
+ return bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
}
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
filter->l2_addr, ETHER_ADDR_LEN);
/* MAC only filter */
rc = bnxt_hwrm_set_filter(bp,
- vnic,
+ vnic->fw_vnic_id,
new_filter);
if (rc)
goto exit;
new_filter->l2_ovlan = vlan_id;
new_filter->l2_ovlan_mask = 0xF000;
new_filter->enables |= en;
- rc = bnxt_hwrm_set_filter(bp, vnic,
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id,
new_filter);
if (rc)
goto exit;
RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
}
+static void
+bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ /* Default Filter is tied to VNIC 0 */
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_filter_info *filter;
+ int rc;
+
+ if (BNXT_VF(bp))
+ return;
+
+ memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
+ memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ /* Default Filter is at Index 0 */
+ if (filter->mac_index != 0)
+ continue;
+ rc = bnxt_hwrm_clear_filter(bp, filter);
+ if (rc)
+ break;
+ memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter->enables |=
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+ if (rc)
+ break;
+ filter->mac_index = 0;
+ RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
+ }
+}
+
+static int
+bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ char *mc_addr_list = (char *)mc_addr_set;
+ struct bnxt_vnic_info *vnic;
+ uint32_t off = 0, i = 0;
+
+ vnic = &bp->vnic_info[0];
+
+ if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
+ vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
+ goto allmulti;
+ }
+
+ /* TODO Check for Duplicate mcast addresses */
+ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
+ for (i = 0; i < nb_mc_addr; i++) {
+ memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
+ off += ETHER_ADDR_LEN;
+ }
+
+ vnic->mc_addr_cnt = i;
+
+allmulti:
+ return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
/*
* Initialization
*/
.udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op,
.vlan_filter_set = bnxt_vlan_filter_set_op,
.vlan_offload_set = bnxt_vlan_offload_set_op,
+ .mac_addr_set = bnxt_set_default_mac_addr_op,
.xstats_get = bnxt_dev_xstats_get_op,
.xstats_get_names = bnxt_dev_xstats_get_names_op,
.xstats_reset = bnxt_dev_xstats_reset_op,
+ .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
};
static bool bnxt_vf_pciid(uint16_t id)
if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
- mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ if (vnic->mc_addr_cnt) {
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+ req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
+ req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
+ }
req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
mask);
}
int bnxt_hwrm_set_filter(struct bnxt *bp,
- struct bnxt_vnic_info *vnic,
+ uint16_t dst_id,
struct bnxt_filter_info *filter)
{
int rc = 0;
enables = filter->enables |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
- req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.dst_id = rte_cpu_to_le_16(dst_id);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
int rc = 0;
STAILQ_FOREACH(filter, &vnic->filter, next) {
- rc = bnxt_hwrm_set_filter(bp, vnic, filter);
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
if (rc)
break;
}