return 0;
}
+
+uint32_t
+ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+ uint32_t new_val = orig_val;
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ new_val |= IXGBE_VMOLR_AUPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ new_val |= IXGBE_VMOLR_ROMPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ new_val |= IXGBE_VMOLR_ROPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ new_val |= IXGBE_VMOLR_BAM;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ new_val |= IXGBE_VMOLR_MPE;
+
+ return new_val;
+}
+
static int
ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
uint16_t rx_mask, uint8_t on)
if (ixgbe_vmdq_mode_check(hw) < 0)
return (-ENOTSUP);
- if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG )
- val |= IXGBE_VMOLR_AUPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC )
- val |= IXGBE_VMOLR_ROMPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
- val |= IXGBE_VMOLR_ROPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
- val |= IXGBE_VMOLR_BAM;
- if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
- val |= IXGBE_VMOLR_MPE;
+ val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
if (on)
vmolr |= val;
struct ixgbe_hw *hw;
enum rte_eth_nb_pools num_pools;
uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vmolr = 0;
int i;
PMD_INIT_FUNC_TRACE();
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+ for (i = 0; i < (int)num_pools; i++) {
+ vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
+ }
+
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */