X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftxgbe%2Ftxgbe_ethdev.c;h=e8362c07e018a48588bc9c593e0d79917c7e2d76;hb=983a4ef2265bc09e03937756cc05fe5a0ffef4c5;hp=bfdf82a96c4f1650afb68d07b49d4a0e9f2e0985;hpb=cc389e5109e83320bcdd77630236244c6dc28da2;p=dpdk.git diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index bfdf82a96c..e8362c07e0 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -21,6 +21,72 @@ #include "base/txgbe.h" #include "txgbe_ethdev.h" #include "txgbe_rxtx.h" +#include "txgbe_regs_group.h" + +static const struct reg_info txgbe_regs_general[] = { + {TXGBE_RST, 1, 1, "TXGBE_RST"}, + {TXGBE_STAT, 1, 1, "TXGBE_STAT"}, + {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"}, + {TXGBE_SDP, 1, 1, "TXGBE_SDP"}, + {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"}, + {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"}, + {0, 0, 0, ""} +}; + +static const struct reg_info txgbe_regs_nvm[] = { + {0, 0, 0, ""} +}; + +static const struct reg_info txgbe_regs_interrupt[] = { + {0, 0, 0, ""} +}; + +static const struct reg_info txgbe_regs_fctl_others[] = { + {0, 0, 0, ""} +}; + +static const struct reg_info txgbe_regs_rxdma[] = { + {0, 0, 0, ""} +}; + +static const struct reg_info txgbe_regs_rx[] = { + {0, 0, 0, ""} +}; + +static struct reg_info txgbe_regs_tx[] = { + {0, 0, 0, ""} +}; + +static const struct reg_info txgbe_regs_wakeup[] = { + {0, 0, 0, ""} +}; + +static const struct reg_info txgbe_regs_dcb[] = { + {0, 0, 0, ""} +}; + +static const struct reg_info txgbe_regs_mac[] = { + {0, 0, 0, ""} +}; + +static const struct reg_info txgbe_regs_diagnostic[] = { + {0, 0, 0, ""}, +}; + +/* PF registers */ +static const struct reg_info *txgbe_regs_others[] = { + txgbe_regs_general, + txgbe_regs_nvm, + txgbe_regs_interrupt, + txgbe_regs_fctl_others, + txgbe_regs_rxdma, + txgbe_regs_rx, + txgbe_regs_tx, + txgbe_regs_wakeup, + txgbe_regs_dcb, + txgbe_regs_mac, + txgbe_regs_diagnostic, + NULL}; static int txgbe_dev_set_link_up(struct rte_eth_dev *dev); static int txgbe_dev_set_link_down(struct rte_eth_dev *dev); @@ -43,6 +109,8 @@ static void txgbe_dev_interrupt_handler(void *param); static void txgbe_dev_interrupt_delayed_handler(void *param); static void txgbe_configure_msix(struct rte_eth_dev *dev); +static int txgbe_filter_restore(struct rte_eth_dev *dev); + #define TXGBE_SET_HWSTRIP(h, q) do {\ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ @@ -404,16 +472,20 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev); struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev); struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; const struct rte_memzone *mz; uint32_t ctrl_ext; uint16_t csum; - int err, i; + int err, i, ret; PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = &txgbe_eth_dev_ops; + eth_dev->rx_queue_count = txgbe_dev_rx_queue_count; + eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status; + eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status; eth_dev->rx_pkt_burst = &txgbe_recv_pkts; eth_dev->tx_pkt_burst = &txgbe_xmit_pkts; eth_dev->tx_pkt_prepare = &txgbe_prep_pkts; @@ -570,7 +642,14 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) memset(hwstrip, 0, sizeof(*hwstrip)); /* initialize PF if max_vfs not zero */ - txgbe_pf_host_init(eth_dev); + ret = txgbe_pf_host_init(eth_dev); + if (ret) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + rte_free(eth_dev->data->hash_mac_addrs); + eth_dev->data->hash_mac_addrs = NULL; + return ret; + } ctrl_ext = rd32(hw, TXGBE_PORTCTL); /* let hardware know driver is loaded */ @@ -601,6 +680,13 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* enable support intr */ txgbe_enable_intr(eth_dev); + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct txgbe_filter_info)); + + /* initialize 5tuple filter list */ + TAILQ_INIT(&filter_info->fivetuple_list); + /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct txgbe_bw_conf)); @@ -620,6 +706,23 @@ eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev) return 0; } +static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); + struct txgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, + entries); + rte_free(p_5tuple); + } + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE); + + return 0; +} + static int eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) @@ -1510,6 +1613,7 @@ skip_link_setup: /* resume enabled intr since hw reset */ txgbe_enable_intr(dev); + txgbe_filter_restore(dev); /* * Update link status right before return, because it may @@ -1698,6 +1802,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) rte_free(dev->data->hash_mac_addrs); dev->data->hash_mac_addrs = NULL; + /* Remove all ntuple filters of the device */ + txgbe_ntuple_filter_uninit(dev); + return ret; } @@ -2064,9 +2171,8 @@ txgbe_get_offset_by_id(uint32_t id, uint32_t *offset) nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t)); return 0; } - id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP; - return -(int)(id + 1); + return -1; } static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev, @@ -2223,6 +2329,27 @@ txgbe_dev_xstats_reset(struct rte_eth_dev *dev) return 0; } +static int +txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + u16 eeprom_verh, eeprom_verl; + u32 etrack_id; + int ret; + + hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh); + hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl); + + etrack_id = (eeprom_verh << 16) | eeprom_verl; + ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + static int txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { @@ -2402,6 +2529,65 @@ txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) return txgbe_dev_link_update_share(dev, wait_to_complete); } +static int +txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t fctrl; + + fctrl = rd32(hw, TXGBE_PSRCTL); + fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP); + wr32(hw, TXGBE_PSRCTL, fctrl); + + return 0; +} + +static int +txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t fctrl; + + fctrl = rd32(hw, TXGBE_PSRCTL); + fctrl &= (~TXGBE_PSRCTL_UCP); + if (dev->data->all_multicast == 1) + fctrl |= TXGBE_PSRCTL_MCP; + else + fctrl &= (~TXGBE_PSRCTL_MCP); + wr32(hw, TXGBE_PSRCTL, fctrl); + + return 0; +} + +static int +txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t fctrl; + + fctrl = rd32(hw, TXGBE_PSRCTL); + fctrl |= TXGBE_PSRCTL_MCP; + wr32(hw, TXGBE_PSRCTL, fctrl); + + return 0; +} + +static int +txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t fctrl; + + if (dev->data->promiscuous == 1) + return 0; /* must remain in all_multicast mode */ + + fctrl = rd32(hw, TXGBE_PSRCTL); + fctrl &= (~TXGBE_PSRCTL_MCP); + wr32(hw, TXGBE_PSRCTL, fctrl); + + return 0; +} + /** * It clears the interrupt causes and enables the interrupt. * It will be called once only during nic initialized. @@ -2695,6 +2881,24 @@ txgbe_dev_interrupt_handler(void *param) txgbe_dev_interrupt_action(dev, dev->intr_handle); } +static int +txgbe_dev_led_on(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw; + + hw = TXGBE_DEV_HW(dev); + return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP; +} + +static int +txgbe_dev_led_off(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw; + + hw = TXGBE_DEV_HW(dev); + return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP; +} + static int txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { @@ -2967,6 +3171,46 @@ txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) return 0; } +static int +txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct rte_eth_dev_info dev_info; + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + struct rte_eth_dev_data *dev_data = dev->data; + int ret; + + ret = txgbe_dev_info_get(dev, &dev_info); + if (ret != 0) + return ret; + + /* check that mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) + return -EINVAL; + + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. + */ + if (dev_data->dev_started && !dev_data->scattered_rx && + (frame_size + 2 * TXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); + return -EINVAL; + } + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + if (hw->mode) + wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, + TXGBE_FRAME_SIZE_MAX); + else + wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, + TXGBE_FRMSZ_MAX(frame_size)); + + return 0; +} + static uint32_t txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr) { @@ -3267,6 +3511,426 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, return 0; } +int +txgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t syn_info; + uint32_t synqf; + + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + syn_info = filter_info->syn_info; + + if (add) { + if (syn_info & TXGBE_SYNCLS_ENA) + return -EINVAL; + synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue); + synqf |= TXGBE_SYNCLS_ENA; + + if (filter->hig_pri) + synqf |= TXGBE_SYNCLS_HIPRIO; + else + synqf &= ~TXGBE_SYNCLS_HIPRIO; + } else { + synqf = rd32(hw, TXGBE_SYNCLS); + if (!(syn_info & TXGBE_SYNCLS_ENA)) + return -ENOENT; + synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA); + } + + filter_info->syn_info = synqf; + wr32(hw, TXGBE_SYNCLS, synqf); + txgbe_flush(hw); + return 0; +} + +static inline enum txgbe_5tuple_protocol +convert_protocol_type(uint8_t protocol_value) +{ + if (protocol_value == IPPROTO_TCP) + return TXGBE_5TF_PROT_TCP; + else if (protocol_value == IPPROTO_UDP) + return TXGBE_5TF_PROT_UDP; + else if (protocol_value == IPPROTO_SCTP) + return TXGBE_5TF_PROT_SCTP; + else + return TXGBE_5TF_PROT_NONE; +} + +/* inject a 5-tuple filter to HW */ +static inline void +txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, + struct txgbe_5tuple_filter *filter) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int i; + uint32_t ftqf, sdpqf; + uint32_t l34timir = 0; + uint32_t mask = TXGBE_5TFCTL0_MASK; + + i = filter->index; + sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port)); + sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port)); + + ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto); + ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority); + if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ + mask &= ~TXGBE_5TFCTL0_MSADDR; + if (filter->filter_info.dst_ip_mask == 0) + mask &= ~TXGBE_5TFCTL0_MDADDR; + if (filter->filter_info.src_port_mask == 0) + mask &= ~TXGBE_5TFCTL0_MSPORT; + if (filter->filter_info.dst_port_mask == 0) + mask &= ~TXGBE_5TFCTL0_MDPORT; + if (filter->filter_info.proto_mask == 0) + mask &= ~TXGBE_5TFCTL0_MPROTO; + ftqf |= mask; + ftqf |= TXGBE_5TFCTL0_MPOOL; + ftqf |= TXGBE_5TFCTL0_ENA; + + wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip)); + wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip)); + wr32(hw, TXGBE_5TFPORT(i), sdpqf); + wr32(hw, TXGBE_5TFCTL0(i), ftqf); + + l34timir |= TXGBE_5TFCTL1_QP(filter->queue); + wr32(hw, TXGBE_5TFCTL1(i), l34timir); +} + +/* + * add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: pointer to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +txgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct txgbe_5tuple_filter *filter) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + int i, idx, shift; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) { + idx = i / (sizeof(uint32_t) * NBBY); + shift = i % (sizeof(uint32_t) * NBBY); + if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { + filter_info->fivetuple_mask[idx] |= 1 << shift; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= TXGBE_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + return -ENOSYS; + } + + txgbe_inject_5tuple_filter(dev, filter); + + return 0; +} + +/* + * remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: the pointer of the filter will be removed. + */ +static void +txgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct txgbe_5tuple_filter *filter) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint16_t index = filter->index; + + filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= + ~(1 << (index % (sizeof(uint32_t) * NBBY))); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + wr32(hw, TXGBE_5TFDADDR(index), 0); + wr32(hw, TXGBE_5TFSADDR(index), 0); + wr32(hw, TXGBE_5TFPORT(index), 0); + wr32(hw, TXGBE_5TFCTL0(index), 0); + wr32(hw, TXGBE_5TFCTL1(index), 0); +} + +static inline struct txgbe_5tuple_filter * +txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list, + struct txgbe_5tuple_filter_info *key) +{ + struct txgbe_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct txgbe_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* translate elements in struct rte_eth_ntuple_filter + * to struct txgbe_5tuple_filter_info + */ +static inline int +ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct txgbe_5tuple_filter_info *filter_info) +{ + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM || + filter->priority > TXGBE_5TUPLE_MAX_PRI || + filter->priority < TXGBE_5TUPLE_MIN_PRI) + return -EINVAL; + + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = + convert_protocol_type(filter->proto); + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + return 0; +} + +/* + * add or delete a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + struct txgbe_5tuple_filter_info filter_5tuple; + struct txgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + if (add) { + filter = rte_zmalloc("txgbe_5tuple_filter", + sizeof(struct txgbe_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + rte_memcpy(&filter->filter_info, + &filter_5tuple, + sizeof(struct txgbe_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + ret = txgbe_add_5tuple_filter(dev, filter); + if (ret < 0) { + rte_free(filter); + return ret; + } + } else { + txgbe_remove_5tuple_filter(dev, filter); + } + + return 0; +} + +int +txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t etqf = 0; + uint32_t etqs = 0; + int ret; + struct txgbe_ethertype_filter ethertype_filter; + + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + etqf = TXGBE_ETFLT_ENA; + etqf |= TXGBE_ETFLT_ETID(filter->ether_type); + etqs |= TXGBE_ETCLS_QPID(filter->queue); + etqs |= TXGBE_ETCLS_QENA; + + ethertype_filter.ethertype = filter->ether_type; + ethertype_filter.etqf = etqf; + ethertype_filter.etqs = etqs; + ethertype_filter.conf = FALSE; + ret = txgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSPC; + } + } else { + ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + wr32(hw, TXGBE_ETFLT(ret), etqf); + wr32(hw, TXGBE_ETCLS(ret), etqs); + txgbe_flush(hw); + + return 0; +} + +static int +txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &txgbe_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + static u8 * txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) @@ -3293,6 +3957,400 @@ txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, txgbe_dev_addr_list_itr, TRUE); } +static uint64_t +txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint64_t systime_cycles; + + systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL); + systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32; + + return systime_cycles; +} + +static uint64_t +txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint64_t rx_tstamp_cycles; + + /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL); + rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32; + + return rx_tstamp_cycles; +} + +static uint64_t +txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint64_t tx_tstamp_cycles; + + /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL); + tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32; + + return tx_tstamp_cycles; +} + +static void +txgbe_start_timecounters(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + struct rte_eth_link link; + uint32_t incval = 0; + uint32_t shift = 0; + + /* Get current link speed. */ + txgbe_dev_link_update(dev, 1); + rte_eth_linkstatus_get(dev, &link); + + switch (link.link_speed) { + case ETH_SPEED_NUM_100M: + incval = TXGBE_INCVAL_100; + shift = TXGBE_INCVAL_SHIFT_100; + break; + case ETH_SPEED_NUM_1G: + incval = TXGBE_INCVAL_1GB; + shift = TXGBE_INCVAL_SHIFT_1GB; + break; + case ETH_SPEED_NUM_10G: + default: + incval = TXGBE_INCVAL_10GB; + shift = TXGBE_INCVAL_SHIFT_10GB; + break; + } + + wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2)); + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK; + adapter->systime_tc.cc_shift = shift; + adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK; + adapter->rx_tstamp_tc.cc_shift = shift; + adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK; + adapter->tx_tstamp_tc.cc_shift = shift; + adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; +} + +static int +txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + + ns = rte_timespec_to_ns(ts); + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + + systime_cycles = txgbe_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +txgbe_timesync_enable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t tsync_ctl; + + /* Stop the timesync system time. */ + wr32(hw, TXGBE_TSTIMEINC, 0x0); + /* Reset the timesync system time value. */ + wr32(hw, TXGBE_TSTIMEL, 0x0); + wr32(hw, TXGBE_TSTIMEH, 0x0); + + txgbe_start_timecounters(dev); + + /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), + RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588); + + /* Enable timestamping of received PTP packets. */ + tsync_ctl = rd32(hw, TXGBE_TSRXCTL); + tsync_ctl |= TXGBE_TSRXCTL_ENA; + wr32(hw, TXGBE_TSRXCTL, tsync_ctl); + + /* Enable timestamping of transmitted PTP packets. */ + tsync_ctl = rd32(hw, TXGBE_TSTXCTL); + tsync_ctl |= TXGBE_TSTXCTL_ENA; + wr32(hw, TXGBE_TSTXCTL, tsync_ctl); + + txgbe_flush(hw); + + return 0; +} + +static int +txgbe_timesync_disable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t tsync_ctl; + + /* Disable timestamping of transmitted PTP packets. */ + tsync_ctl = rd32(hw, TXGBE_TSTXCTL); + tsync_ctl &= ~TXGBE_TSTXCTL_ENA; + wr32(hw, TXGBE_TSTXCTL, tsync_ctl); + + /* Disable timestamping of received PTP packets. */ + tsync_ctl = rd32(hw, TXGBE_TSRXCTL); + tsync_ctl &= ~TXGBE_TSRXCTL_ENA; + wr32(hw, TXGBE_TSRXCTL, tsync_ctl); + + /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0); + + /* Stop incrementating the System Time registers. */ + wr32(hw, TXGBE_TSTIMEINC, 0); + + return 0; +} + +static int +txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + uint32_t tsync_rxctl; + uint64_t rx_tstamp_cycles; + uint64_t ns; + + tsync_rxctl = rd32(hw, TXGBE_TSRXCTL); + if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0) + return -EINVAL; + + rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + uint32_t tsync_txctl; + uint64_t tx_tstamp_cycles; + uint64_t ns; + + tsync_txctl = rd32(hw, TXGBE_TSTXCTL); + if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0) + return -EINVAL; + + tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused) +{ + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = txgbe_regs_others; + + while ((reg_group = reg_set[g_ind++])) + count += txgbe_regs_group_count(reg_group); + + return count; +} + +static int +txgbe_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = txgbe_regs_others; + + if (data == NULL) { + regs->length = txgbe_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + + /* Support only full register dump */ + if (regs->length == 0 || + regs->length == (uint32_t)txgbe_get_reg_length(dev)) { + regs->version = hw->mac.type << 24 | + hw->revision_id << 16 | + hw->device_id; + while ((reg_group = reg_set[g_ind++])) + count += txgbe_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +txgbe_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + /* Return unit is byte count */ + return hw->rom.word_size * 2; +} + +static int +txgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_rom_info *eeprom = &hw->rom; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if (first > hw->rom.word_size || + ((first + length) > hw->rom.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->readw_buffer(hw, first, length, data); +} + +static int +txgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_rom_info *eeprom = &hw->rom; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if (first > hw->rom.word_size || + ((first + length) > hw->rom.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->writew_buffer(hw, first, length, data); +} + +static int +txgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t status; + uint8_t sff8472_rev, addr_mode; + bool page_swap = false; + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.read_i2c_eeprom(hw, + TXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = hw->phy.read_i2c_eeprom(hw, + TXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers."); + page_swap = true; + } + + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +txgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID; + uint8_t databyte = 0xFF; + uint8_t *data = info->data; + uint32_t i = 0; + + if (info->length == 0) + return -EINVAL; + + for (i = info->offset; i < info->offset + info->length; i++) { + if (i < RTE_ETH_MODULE_SFF_8079_LEN) + status = hw->phy.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + return -EIO; + + data[i - info->offset] = databyte; + } + + return 0; +} + bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type) { @@ -3304,6 +4362,153 @@ txgbe_rss_update_sp(enum txgbe_mac_type mac_type) } } +static int +txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info) +{ + struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev); + struct txgbe_dcb_tc_config *tc; + struct rte_eth_dcb_tc_queue_mapping *tc_queue; + uint8_t nb_tcs; + uint8_t i, j; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; + else + dcb_info->nb_tcs = 1; + + tc_queue = &dcb_info->tc_queue; + nb_tcs = dcb_info->nb_tcs; + + if (dcb_config->vt_mode) { /* vt is enabled */ + struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = + &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; + if (RTE_ETH_DEV_SRIOV(dev).active > 0) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[0][j].base = j; + tc_queue->tc_rxq[0][j].nb_queue = 1; + tc_queue->tc_txq[0][j].base = j; + tc_queue->tc_txq[0][j].nb_queue = 1; + } + } else { + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_rxq[i][j].nb_queue = 1; + tc_queue->tc_txq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_txq[i][j].nb_queue = 1; + } + } + } + } else { /* vt is disabled */ + struct rte_eth_dcb_rx_conf *rx_conf = + &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; + if (dcb_info->nb_tcs == ETH_4_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 64; + dcb_info->tc_queue.tc_txq[0][2].base = 96; + dcb_info->tc_queue.tc_txq[0][3].base = 112; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + } else if (dcb_info->nb_tcs == ETH_8_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 32; + dcb_info->tc_queue.tc_txq[0][2].base = 64; + dcb_info->tc_queue.tc_txq[0][3].base = 80; + dcb_info->tc_queue.tc_txq[0][4].base = 96; + dcb_info->tc_queue.tc_txq[0][5].base = 104; + dcb_info->tc_queue.tc_txq[0][6].base = 112; + dcb_info->tc_queue.tc_txq[0][7].base = 120; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; + } + } + for (i = 0; i < dcb_info->nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent; + } + return 0; +} + +/* restore n-tuple filter */ +static inline void +txgbe_ntuple_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + struct txgbe_5tuple_filter *node; + + TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { + txgbe_inject_5tuple_filter(dev, node); + } +} + +/* restore ethernet type filter */ +static inline void +txgbe_ethertype_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + int i; + + for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + wr32(hw, TXGBE_ETFLT(i), + filter_info->ethertype_filters[i].etqf); + wr32(hw, TXGBE_ETCLS(i), + filter_info->ethertype_filters[i].etqs); + txgbe_flush(hw); + } + } +} + +/* restore SYN filter */ +static inline void +txgbe_syn_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t synqf; + + synqf = filter_info->syn_info; + + if (synqf & TXGBE_SYNCLS_ENA) { + wr32(hw, TXGBE_SYNCLS, synqf); + txgbe_flush(hw); + } +} + +static int +txgbe_filter_restore(struct rte_eth_dev *dev) +{ + txgbe_ntuple_filter_restore(dev); + txgbe_ethertype_filter_restore(dev); + txgbe_syn_filter_restore(dev); + + return 0; +} + static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, @@ -3313,6 +4518,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_set_link_down = txgbe_dev_set_link_down, .dev_close = txgbe_dev_close, .dev_reset = txgbe_dev_reset, + .promiscuous_enable = txgbe_dev_promiscuous_enable, + .promiscuous_disable = txgbe_dev_promiscuous_disable, + .allmulticast_enable = txgbe_dev_allmulticast_enable, + .allmulticast_disable = txgbe_dev_allmulticast_disable, .link_update = txgbe_dev_link_update, .stats_get = txgbe_dev_stats_get, .xstats_get = txgbe_dev_xstats_get, @@ -3322,7 +4531,9 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .xstats_get_names = txgbe_dev_xstats_get_names, .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id, .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set, + .fw_version_get = txgbe_fw_version_get, .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get, + .mtu_set = txgbe_dev_mtu_set, .vlan_filter_set = txgbe_vlan_filter_set, .vlan_tpid_set = txgbe_vlan_tpid_set, .vlan_offload_set = txgbe_vlan_offload_set, @@ -3337,6 +4548,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .rx_queue_release = txgbe_dev_rx_queue_release, .tx_queue_setup = txgbe_dev_tx_queue_setup, .tx_queue_release = txgbe_dev_tx_queue_release, + .dev_led_on = txgbe_dev_led_on, + .dev_led_off = txgbe_dev_led_off, .flow_ctrl_get = txgbe_flow_ctrl_get, .flow_ctrl_set = txgbe_flow_ctrl_set, .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set, @@ -3350,9 +4563,25 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .reta_query = txgbe_dev_rss_reta_query, .rss_hash_update = txgbe_dev_rss_hash_update, .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get, + .filter_ctrl = txgbe_dev_filter_ctrl, .set_mc_addr_list = txgbe_dev_set_mc_addr_list, .rxq_info_get = txgbe_rxq_info_get, .txq_info_get = txgbe_txq_info_get, + .timesync_enable = txgbe_timesync_enable, + .timesync_disable = txgbe_timesync_disable, + .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp, + .get_reg = txgbe_get_regs, + .get_eeprom_length = txgbe_get_eeprom_length, + .get_eeprom = txgbe_get_eeprom, + .set_eeprom = txgbe_set_eeprom, + .get_module_info = txgbe_get_module_info, + .get_module_eeprom = txgbe_get_module_eeprom, + .get_dcb_info = txgbe_dev_get_dcb_info, + .timesync_adjust_time = txgbe_timesync_adjust_time, + .timesync_read_time = txgbe_timesync_read_time, + .timesync_write_time = txgbe_timesync_write_time, + .tx_done_cleanup = txgbe_dev_tx_done_cleanup, }; RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);