static const struct eth_dev_ops ngbe_eth_dev_ops;
+#define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
+#define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
+static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
+ /* MNG RxTx */
+ HW_XSTAT(mng_bmc2host_packets),
+ HW_XSTAT(mng_host2bmc_packets),
+ /* Basic RxTx */
+ HW_XSTAT(rx_packets),
+ HW_XSTAT(tx_packets),
+ HW_XSTAT(rx_bytes),
+ HW_XSTAT(tx_bytes),
+ HW_XSTAT(rx_total_bytes),
+ HW_XSTAT(rx_total_packets),
+ HW_XSTAT(tx_total_packets),
+ HW_XSTAT(rx_total_missed_packets),
+ HW_XSTAT(rx_broadcast_packets),
+ HW_XSTAT(rx_multicast_packets),
+ HW_XSTAT(rx_management_packets),
+ HW_XSTAT(tx_management_packets),
+ HW_XSTAT(rx_management_dropped),
+
+ /* Basic Error */
+ HW_XSTAT(rx_crc_errors),
+ HW_XSTAT(rx_illegal_byte_errors),
+ HW_XSTAT(rx_error_bytes),
+ HW_XSTAT(rx_mac_short_packet_dropped),
+ HW_XSTAT(rx_length_errors),
+ HW_XSTAT(rx_undersize_errors),
+ HW_XSTAT(rx_fragment_errors),
+ HW_XSTAT(rx_oversize_errors),
+ HW_XSTAT(rx_jabber_errors),
+ HW_XSTAT(rx_l3_l4_xsum_error),
+ HW_XSTAT(mac_local_errors),
+ HW_XSTAT(mac_remote_errors),
+
+ /* MACSEC */
+ HW_XSTAT(tx_macsec_pkts_untagged),
+ HW_XSTAT(tx_macsec_pkts_encrypted),
+ HW_XSTAT(tx_macsec_pkts_protected),
+ HW_XSTAT(tx_macsec_octets_encrypted),
+ HW_XSTAT(tx_macsec_octets_protected),
+ HW_XSTAT(rx_macsec_pkts_untagged),
+ HW_XSTAT(rx_macsec_pkts_badtag),
+ HW_XSTAT(rx_macsec_pkts_nosci),
+ HW_XSTAT(rx_macsec_pkts_unknownsci),
+ HW_XSTAT(rx_macsec_octets_decrypted),
+ HW_XSTAT(rx_macsec_octets_validated),
+ HW_XSTAT(rx_macsec_sc_pkts_unchecked),
+ HW_XSTAT(rx_macsec_sc_pkts_delayed),
+ HW_XSTAT(rx_macsec_sc_pkts_late),
+ HW_XSTAT(rx_macsec_sa_pkts_ok),
+ HW_XSTAT(rx_macsec_sa_pkts_invalid),
+ HW_XSTAT(rx_macsec_sa_pkts_notvalid),
+ HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
+ HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
+
+ /* MAC RxTx */
+ HW_XSTAT(rx_size_64_packets),
+ HW_XSTAT(rx_size_65_to_127_packets),
+ HW_XSTAT(rx_size_128_to_255_packets),
+ HW_XSTAT(rx_size_256_to_511_packets),
+ HW_XSTAT(rx_size_512_to_1023_packets),
+ HW_XSTAT(rx_size_1024_to_max_packets),
+ HW_XSTAT(tx_size_64_packets),
+ HW_XSTAT(tx_size_65_to_127_packets),
+ HW_XSTAT(tx_size_128_to_255_packets),
+ HW_XSTAT(tx_size_256_to_511_packets),
+ HW_XSTAT(tx_size_512_to_1023_packets),
+ HW_XSTAT(tx_size_1024_to_max_packets),
+
+ /* Flow Control */
+ HW_XSTAT(tx_xon_packets),
+ HW_XSTAT(rx_xon_packets),
+ HW_XSTAT(tx_xoff_packets),
+ HW_XSTAT(rx_xoff_packets),
+
+ HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
+ HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
+ HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
+ HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
+};
+
+#define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
+ sizeof(rte_ngbe_stats_strings[0]))
+
+/* Per-queue statistics */
+#define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
+static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
+ QP_XSTAT(rx_qp_packets),
+ QP_XSTAT(tx_qp_packets),
+ QP_XSTAT(rx_qp_bytes),
+ QP_XSTAT(tx_qp_bytes),
+ QP_XSTAT(rx_qp_mc_packets),
+};
+
+#define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
+ sizeof(rte_ngbe_qp_strings[0]))
+
static inline int32_t
ngbe_pf_reset_hw(struct ngbe_hw *hw)
{
goto error;
}
+ /* Skip link setup if loopback mode is enabled. */
+ if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
+ goto skip_link_setup;
+
err = hw->mac.check_link(hw, &speed, &link_up, 0);
if (err != 0)
goto error;
if (err != 0)
goto error;
+skip_link_setup:
+
if (rte_intr_allow_others(intr_handle)) {
ngbe_dev_misc_interrupt_setup(dev);
/* check if lsc interrupt is enabled */
return 0;
}
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+ngbe_xstats_calc_num(struct rte_eth_dev *dev)
+{
+ int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+ return NGBE_NB_HW_STATS +
+ NGBE_NB_QP_STATS * nb_queues;
+}
+
+static inline int
+ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
+{
+ int nb, st;
+
+ /* Extended stats from ngbe_hw_stats */
+ if (id < NGBE_NB_HW_STATS) {
+ snprintf(name, size, "[hw]%s",
+ rte_ngbe_stats_strings[id].name);
+ return 0;
+ }
+ id -= NGBE_NB_HW_STATS;
+
+ /* Queue Stats */
+ if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
+ nb = id / NGBE_NB_QP_STATS;
+ st = id % NGBE_NB_QP_STATS;
+ snprintf(name, size, "[q%u]%s", nb,
+ rte_ngbe_qp_strings[st].name);
+ return 0;
+ }
+ id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
+
+ return -(int)(id + 1);
+}
+
+static inline int
+ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
+{
+ int nb, st;
+
+ /* Extended stats from ngbe_hw_stats */
+ if (id < NGBE_NB_HW_STATS) {
+ *offset = rte_ngbe_stats_strings[id].offset;
+ return 0;
+ }
+ id -= NGBE_NB_HW_STATS;
+
+ /* Queue Stats */
+ if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
+ nb = id / NGBE_NB_QP_STATS;
+ st = id % NGBE_NB_QP_STATS;
+ *offset = rte_ngbe_qp_strings[st].offset +
+ nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
+ return 0;
+ }
+
+ return -1;
+}
+
+static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned int limit)
+{
+ unsigned int i, count;
+
+ count = ngbe_xstats_calc_num(dev);
+ if (xstats_names == NULL)
+ return count;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+ limit = min(limit, count);
+
+ /* Extended stats from ngbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ if (ngbe_get_name_by_id(i, xstats_names[i].name,
+ sizeof(xstats_names[i].name))) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ const uint64_t *ids,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit)
+{
+ unsigned int i;
+
+ if (ids == NULL)
+ return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
+
+ for (i = 0; i < limit; i++) {
+ if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
+ sizeof(xstats_names[i].name))) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ return -1;
+ }
+ }
+
+ return i;
+}
+
+static int
+ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int limit)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+ unsigned int i, count;
+
+ ngbe_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ count = ngbe_xstats_calc_num(dev);
+ if (xstats == NULL)
+ return count;
+
+ limit = min(limit, ngbe_xstats_calc_num(dev));
+
+ /* Extended stats from ngbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ uint32_t offset = 0;
+
+ if (ngbe_get_offset_by_id(i, &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
+ xstats[i].id = i;
+ }
+
+ return i;
+}
+
+static int
+ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
+ unsigned int limit)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+ unsigned int i, count;
+
+ ngbe_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ count = ngbe_xstats_calc_num(dev);
+ if (values == NULL)
+ return count;
+
+ limit = min(limit, ngbe_xstats_calc_num(dev));
+
+ /* Extended stats from ngbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ uint32_t offset;
+
+ if (ngbe_get_offset_by_id(i, &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+ }
+
+ return i;
+}
+
+static int
+ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int limit)
+{
+ struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+ unsigned int i;
+
+ if (ids == NULL)
+ return ngbe_dev_xstats_get_(dev, values, limit);
+
+ for (i = 0; i < limit; i++) {
+ uint32_t offset;
+
+ if (ngbe_get_offset_by_id(ids[i], &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+ }
+
+ return i;
+}
+
+static int
+ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+
+ /* HW registers are cleared on read */
+ hw->offset_loaded = 0;
+ ngbe_read_stats_registers(hw, hw_stats);
+ hw->offset_loaded = 1;
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+
+ return 0;
+}
+
+static int
+ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ int ret;
+
+ ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
+
+ if (ret < 0)
+ return -EINVAL;
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (size_t)ret)
+ return ret;
+
+ return 0;
+}
+
static int
ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ngbe_hw *hw = ngbe_dev_hw(dev);
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024;
dev_info->max_rx_pktlen = 15872;
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
return ngbe_dev_link_update_share(dev, wait_to_complete);
}
+static int
+ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t fctrl;
+
+ fctrl = rd32(hw, NGBE_PSRCTL);
+ fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
+ wr32(hw, NGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+static int
+ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t fctrl;
+
+ fctrl = rd32(hw, NGBE_PSRCTL);
+ fctrl &= (~NGBE_PSRCTL_UCP);
+ if (dev->data->all_multicast == 1)
+ fctrl |= NGBE_PSRCTL_MCP;
+ else
+ fctrl &= (~NGBE_PSRCTL_MCP);
+ wr32(hw, NGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+static int
+ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t fctrl;
+
+ fctrl = rd32(hw, NGBE_PSRCTL);
+ fctrl |= NGBE_PSRCTL_MCP;
+ wr32(hw, NGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+static int
+ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t fctrl;
+
+ if (dev->data->promiscuous == 1)
+ return 0; /* must remain in all_multicast mode */
+
+ fctrl = rd32(hw, NGBE_PSRCTL);
+ fctrl &= (~NGBE_PSRCTL_MCP);
+ wr32(hw, NGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
/**
* It clears the interrupt causes and enables the interrupt.
* It will be called once only during NIC initialized.
ngbe_dev_interrupt_action(dev);
}
+static int
+ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t enable_addr = 1;
+
+ return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
+ pool, enable_addr);
+}
+
+static void
+ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+
+ ngbe_clear_rar(hw, index);
+}
+
+static int
+ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ ngbe_remove_rar(dev, 0);
+ ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+
+ return 0;
+}
+
+static int
+ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
+ struct rte_eth_dev_data *dev_data = dev->data;
+
+ /* If device is started, refuse mtu that requires the support of
+ * scattered packets when this feature has not been enabled before.
+ */
+ if (dev_data->dev_started && !dev_data->scattered_rx &&
+ (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_INIT_LOG(ERR, "Stop port first.");
+ return -EINVAL;
+ }
+
+ if (hw->mode)
+ wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
+ NGBE_FRAME_SIZE_MAX);
+ else
+ wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
+ NGBE_FRMSZ_MAX(frame_size));
+
+ return 0;
+}
+
+static uint32_t
+ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
+{
+ uint32_t vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 4) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 3) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 2) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((uc_addr->addr_bytes[4]) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+static int
+ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr, uint8_t on)
+{
+ uint32_t vector;
+ uint32_t uta_idx;
+ uint32_t reg_val;
+ uint32_t uta_mask;
+ uint32_t psrctl;
+
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
+
+ vector = ngbe_uta_vector(hw, mac_addr);
+ uta_idx = (vector >> 5) & 0x7F;
+ uta_mask = 0x1UL << (vector & 0x1F);
+
+ if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
+ return 0;
+
+ reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
+ if (on) {
+ uta_info->uta_in_use++;
+ reg_val |= uta_mask;
+ uta_info->uta_shadow[uta_idx] |= uta_mask;
+ } else {
+ uta_info->uta_in_use--;
+ reg_val &= ~uta_mask;
+ uta_info->uta_shadow[uta_idx] &= ~uta_mask;
+ }
+
+ wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
+
+ psrctl = rd32(hw, NGBE_PSRCTL);
+ if (uta_info->uta_in_use > 0)
+ psrctl |= NGBE_PSRCTL_UCHFENA;
+ else
+ psrctl &= ~NGBE_PSRCTL_UCHFENA;
+
+ psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
+ psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, NGBE_PSRCTL, psrctl);
+
+ return 0;
+}
+
+static int
+ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
+ uint32_t psrctl;
+ int i;
+
+ if (on) {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = ~0;
+ wr32(hw, NGBE_UCADDRTBL(i), ~0);
+ }
+ } else {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = 0;
+ wr32(hw, NGBE_UCADDRTBL(i), 0);
+ }
+ }
+
+ psrctl = rd32(hw, NGBE_PSRCTL);
+ if (on)
+ psrctl |= NGBE_PSRCTL_UCHFENA;
+ else
+ psrctl &= ~NGBE_PSRCTL_UCHFENA;
+
+ psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
+ psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, NGBE_PSRCTL, psrctl);
+
+ return 0;
+}
+
/**
* Set the IVAR registers, mapping interrupt causes to vectors
* @param hw
| NGBE_ITR_WRDSA);
}
+static u8 *
+ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
+ u8 **mc_addr_ptr, u32 *vmdq)
+{
+ u8 *mc_addr;
+
+ *vmdq = 0;
+ mc_addr = *mc_addr_ptr;
+ *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
+ return mc_addr;
+}
+
+int
+ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct ngbe_hw *hw = ngbe_dev_hw(dev);
+ u8 *mc_addr_list;
+
+ mc_addr_list = (u8 *)mc_addr_set;
+ return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+ ngbe_dev_addr_list_itr, TRUE);
+}
+
static const struct eth_dev_ops ngbe_eth_dev_ops = {
.dev_configure = ngbe_dev_configure,
.dev_infos_get = ngbe_dev_info_get,
.dev_stop = ngbe_dev_stop,
.dev_close = ngbe_dev_close,
.dev_reset = ngbe_dev_reset,
+ .promiscuous_enable = ngbe_dev_promiscuous_enable,
+ .promiscuous_disable = ngbe_dev_promiscuous_disable,
+ .allmulticast_enable = ngbe_dev_allmulticast_enable,
+ .allmulticast_disable = ngbe_dev_allmulticast_disable,
.link_update = ngbe_dev_link_update,
.stats_get = ngbe_dev_stats_get,
+ .xstats_get = ngbe_dev_xstats_get,
+ .xstats_get_by_id = ngbe_dev_xstats_get_by_id,
.stats_reset = ngbe_dev_stats_reset,
+ .xstats_reset = ngbe_dev_xstats_reset,
+ .xstats_get_names = ngbe_dev_xstats_get_names,
+ .xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id,
+ .fw_version_get = ngbe_fw_version_get,
.dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get,
+ .mtu_set = ngbe_dev_mtu_set,
.vlan_filter_set = ngbe_vlan_filter_set,
.vlan_tpid_set = ngbe_vlan_tpid_set,
.vlan_offload_set = ngbe_vlan_offload_set,
.rx_queue_release = ngbe_dev_rx_queue_release,
.tx_queue_setup = ngbe_dev_tx_queue_setup,
.tx_queue_release = ngbe_dev_tx_queue_release,
+ .mac_addr_add = ngbe_add_rar,
+ .mac_addr_remove = ngbe_remove_rar,
+ .mac_addr_set = ngbe_set_default_mac_addr,
+ .uc_hash_table_set = ngbe_uc_hash_table_set,
+ .uc_all_hash_table_set = ngbe_uc_all_hash_table_set,
+ .set_mc_addr_list = ngbe_dev_set_mc_addr_list,
.rx_burst_mode_get = ngbe_rx_burst_mode_get,
.tx_burst_mode_get = ngbe_tx_burst_mode_get,
};