uint32_t index, uint32_t pool);
static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
#define EM_FC_PAUSE_TIME 0x0680
#define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
#define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
.flow_ctrl_set = eth_em_flow_ctrl_set,
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove = eth_em_rar_clear,
+ .set_mc_addr_list = eth_em_set_mc_addr_list,
};
/**
return 0;
}
+static int
+eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
+ return 0;
+}
+
struct rte_driver em_pmd_drv = {
.type = PMD_PDEV,
.init = rte_em_pmd_init,
enum rte_filter_op filter_op,
void *arg);
+static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
/*
* Define VF Stats MACRO for Non "cleared on read" register
*/
.rss_hash_update = eth_igb_rss_hash_update,
.rss_hash_conf_get = eth_igb_rss_hash_conf_get,
.filter_ctrl = eth_igb_filter_ctrl,
+ .set_mc_addr_list = eth_igb_set_mc_addr_list,
};
/*
.rx_queue_release = eth_igb_rx_queue_release,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
+ .set_mc_addr_list = eth_igb_set_mc_addr_list,
};
/**
return ret;
}
+static int
+eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
+ return 0;
+}
+
static struct rte_driver pmd_igb_drv = {
.type = PMD_PDEV,
.init = rte_igb_pmd_init,