+ /* cancel the delay handler before remove dev */
+ rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
+
+ /* uninitialize PF if max_vfs not zero */
+ txgbe_pf_host_uninit(dev);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ rte_free(dev->data->hash_mac_addrs);
+ dev->data->hash_mac_addrs = NULL;
+
+ /* remove all the fdir filters & hash */
+ txgbe_fdir_filter_uninit(dev);
+
+ /* remove all the L2 tunnel filters & hash */
+ txgbe_l2_tn_filter_uninit(dev);
+
+ /* Remove all ntuple filters of the device */
+ txgbe_ntuple_filter_uninit(dev);
+
+ /* clear all the filters list */
+ txgbe_filterlist_flush();
+
+ /* Remove all Traffic Manager configuration */
+ txgbe_tm_conf_uninit(dev);
+
+#ifdef RTE_LIB_SECURITY
+ rte_free(dev->security_ctx);
+#endif
+
+ return ret;
+}
+
+/*
+ * Reset PF device.
+ */
+static int
+txgbe_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ /* When a DPDK PMD PF begin to reset PF port, it should notify all
+ * its VF to make them align with it. The detailed notification
+ * mechanism is PMD specific. As to txgbe PF, it is rather complex.
+ * To avoid unexpected behavior in VF, currently reset of PF with
+ * SR-IOV activation is not supported. It might be supported later.
+ */
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = eth_txgbe_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_txgbe_dev_init(dev, NULL);
+
+ return ret;
+}
+
+#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ uint32_t current_counter = rd32(hw, reg); \
+ if (current_counter < last_counter) \
+ current_counter += 0x100000000LL; \
+ if (!hw->offset_loaded) \
+ last_counter = current_counter; \
+ counter = current_counter - last_counter; \
+ counter &= 0xFFFFFFFFLL; \
+ }
+
+#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
+ uint64_t current_counter_msb = rd32(hw, reg_msb); \
+ uint64_t current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ current_counter += 0x1000000000LL; \
+ if (!hw->offset_loaded) \
+ last_counter = current_counter; \
+ counter = current_counter - last_counter; \
+ counter &= 0xFFFFFFFFFLL; \
+ }
+
+void
+txgbe_read_stats_registers(struct txgbe_hw *hw,
+ struct txgbe_hw_stats *hw_stats)
+{
+ unsigned int i;
+
+ /* QP Stats */
+ for (i = 0; i < hw->nb_rx_queues; i++) {
+ UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
+ hw->qp_last[i].rx_qp_packets,
+ hw_stats->qp[i].rx_qp_packets);
+ UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
+ hw->qp_last[i].rx_qp_bytes,
+ hw_stats->qp[i].rx_qp_bytes);
+ UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
+ hw->qp_last[i].rx_qp_mc_packets,
+ hw_stats->qp[i].rx_qp_mc_packets);
+ }
+
+ for (i = 0; i < hw->nb_tx_queues; i++) {
+ UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
+ hw->qp_last[i].tx_qp_packets,
+ hw_stats->qp[i].tx_qp_packets);
+ UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
+ hw->qp_last[i].tx_qp_bytes,
+ hw_stats->qp[i].tx_qp_bytes);
+ }
+ /* PB Stats */
+ for (i = 0; i < TXGBE_MAX_UP; i++) {
+ hw_stats->up[i].rx_up_xon_packets +=
+ rd32(hw, TXGBE_PBRXUPXON(i));
+ hw_stats->up[i].rx_up_xoff_packets +=
+ rd32(hw, TXGBE_PBRXUPXOFF(i));
+ hw_stats->up[i].tx_up_xon_packets +=
+ rd32(hw, TXGBE_PBTXUPXON(i));
+ hw_stats->up[i].tx_up_xoff_packets +=
+ rd32(hw, TXGBE_PBTXUPXOFF(i));
+ hw_stats->up[i].tx_up_xon2off_packets +=
+ rd32(hw, TXGBE_PBTXUPOFF(i));
+ hw_stats->up[i].rx_up_dropped +=
+ rd32(hw, TXGBE_PBRXMISS(i));
+ }
+ hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
+ hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
+ hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
+ hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
+
+ /* DMA Stats */
+ hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
+ hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
+
+ hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
+ hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
+ hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP);
+ hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
+
+ /* MAC Stats */
+ hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
+ hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
+ hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
+
+ hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
+ hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
+ hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
+
+ hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
+ hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
+
+ hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
+ hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
+ hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
+ hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
+ hw_stats->rx_size_512_to_1023_packets +=
+ rd64(hw, TXGBE_MACRX512TO1023L);
+ hw_stats->rx_size_1024_to_max_packets +=
+ rd64(hw, TXGBE_MACRX1024TOMAXL);
+ hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
+ hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
+ hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
+ hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
+ hw_stats->tx_size_512_to_1023_packets +=
+ rd64(hw, TXGBE_MACTX512TO1023L);
+ hw_stats->tx_size_1024_to_max_packets +=
+ rd64(hw, TXGBE_MACTX1024TOMAXL);
+
+ hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
+ hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
+ hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
+
+ /* MNG Stats */
+ hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
+ hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
+ hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
+ hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
+
+ /* FCoE Stats */
+ hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
+ hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
+ hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
+ hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
+ hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
+ hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
+ hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
+
+ /* Flow Director Stats */
+ hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
+ hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
+ hw_stats->flow_director_added_filters +=
+ TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
+ hw_stats->flow_director_removed_filters +=
+ TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
+ hw_stats->flow_director_filter_add_errors +=
+ TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
+ hw_stats->flow_director_filter_remove_errors +=
+ TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
+
+ /* MACsec Stats */
+ hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
+ hw_stats->tx_macsec_pkts_encrypted +=
+ rd32(hw, TXGBE_LSECTX_ENCPKT);
+ hw_stats->tx_macsec_pkts_protected +=
+ rd32(hw, TXGBE_LSECTX_PROTPKT);
+ hw_stats->tx_macsec_octets_encrypted +=
+ rd32(hw, TXGBE_LSECTX_ENCOCT);
+ hw_stats->tx_macsec_octets_protected +=
+ rd32(hw, TXGBE_LSECTX_PROTOCT);
+ hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
+ hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
+ hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
+ hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
+ hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
+ hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
+ hw_stats->rx_macsec_sc_pkts_unchecked +=
+ rd32(hw, TXGBE_LSECRX_UNCHKPKT);
+ hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
+ hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
+ for (i = 0; i < 2; i++) {
+ hw_stats->rx_macsec_sa_pkts_ok +=
+ rd32(hw, TXGBE_LSECRX_OKPKT(i));
+ hw_stats->rx_macsec_sa_pkts_invalid +=
+ rd32(hw, TXGBE_LSECRX_INVPKT(i));
+ hw_stats->rx_macsec_sa_pkts_notvalid +=
+ rd32(hw, TXGBE_LSECRX_BADPKT(i));
+ }
+ hw_stats->rx_macsec_sa_pkts_unusedsa +=
+ rd32(hw, TXGBE_LSECRX_INVSAPKT);
+ hw_stats->rx_macsec_sa_pkts_notusingsa +=
+ rd32(hw, TXGBE_LSECRX_BADSAPKT);
+
+ hw_stats->rx_total_missed_packets = 0;
+ for (i = 0; i < TXGBE_MAX_UP; i++) {
+ hw_stats->rx_total_missed_packets +=
+ hw_stats->up[i].rx_up_dropped;
+ }
+}
+
+static int
+txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ struct txgbe_stat_mappings *stat_mappings =
+ TXGBE_DEV_STAT_MAPPINGS(dev);
+ uint32_t i, j;
+
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ if (stats == NULL)
+ return -EINVAL;
+
+ /* Fill out the rte_eth_stats statistics structure */
+ stats->ipackets = hw_stats->rx_packets;
+ stats->ibytes = hw_stats->rx_bytes;
+ stats->opackets = hw_stats->tx_packets;
+ stats->obytes = hw_stats->tx_bytes;
+
+ memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
+ memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
+ memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
+ memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
+ memset(&stats->q_errors, 0, sizeof(stats->q_errors));
+ for (i = 0; i < TXGBE_MAX_QP; i++) {
+ uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
+ uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
+ uint32_t q_map;
+
+ q_map = (stat_mappings->rqsm[n] >> offset)
+ & QMAP_FIELD_RESERVED_BITS_MASK;
+ j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+ ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
+ stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
+
+ q_map = (stat_mappings->tqsm[n] >> offset)
+ & QMAP_FIELD_RESERVED_BITS_MASK;
+ j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+ ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
+ stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
+ }
+
+ /* Rx Errors */
+ stats->imissed = hw_stats->rx_total_missed_packets +
+ hw_stats->rx_dma_drop;
+ stats->ierrors = hw_stats->rx_crc_errors +
+ hw_stats->rx_mac_short_packet_dropped +
+ hw_stats->rx_length_errors +
+ hw_stats->rx_undersize_errors +
+ hw_stats->rx_oversize_errors +
+ hw_stats->rx_drop_packets +
+ hw_stats->rx_illegal_byte_errors +
+ hw_stats->rx_error_bytes +
+ hw_stats->rx_fragment_errors +
+ hw_stats->rx_fcoe_crc_errors +
+ hw_stats->rx_fcoe_mbuf_allocation_errors;
+
+ /* Tx Errors */
+ stats->oerrors = 0;
+ return 0;
+}
+
+static int
+txgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+
+ /* HW registers are cleared on read */
+ hw->offset_loaded = 0;
+ txgbe_dev_stats_get(dev, NULL);
+ hw->offset_loaded = 1;
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+
+ return 0;
+}
+
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+txgbe_xstats_calc_num(struct rte_eth_dev *dev)
+{
+ int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+ return TXGBE_NB_HW_STATS +
+ TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
+ TXGBE_NB_QP_STATS * nb_queues;
+}
+
+static inline int
+txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
+{
+ int nb, st;
+
+ /* Extended stats from txgbe_hw_stats */
+ if (id < TXGBE_NB_HW_STATS) {
+ snprintf(name, size, "[hw]%s",
+ rte_txgbe_stats_strings[id].name);
+ return 0;
+ }
+ id -= TXGBE_NB_HW_STATS;
+
+ /* Priority Stats */
+ if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+ nb = id / TXGBE_NB_UP_STATS;
+ st = id % TXGBE_NB_UP_STATS;
+ snprintf(name, size, "[p%u]%s", nb,
+ rte_txgbe_up_strings[st].name);
+ return 0;
+ }
+ id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+ /* Queue Stats */
+ if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+ nb = id / TXGBE_NB_QP_STATS;
+ st = id % TXGBE_NB_QP_STATS;
+ snprintf(name, size, "[q%u]%s", nb,
+ rte_txgbe_qp_strings[st].name);
+ return 0;
+ }
+ id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
+
+ return -(int)(id + 1);
+}
+
+static inline int
+txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
+{
+ int nb, st;
+
+ /* Extended stats from txgbe_hw_stats */
+ if (id < TXGBE_NB_HW_STATS) {
+ *offset = rte_txgbe_stats_strings[id].offset;
+ return 0;
+ }
+ id -= TXGBE_NB_HW_STATS;
+
+ /* Priority Stats */
+ if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+ nb = id / TXGBE_NB_UP_STATS;
+ st = id % TXGBE_NB_UP_STATS;
+ *offset = rte_txgbe_up_strings[st].offset +
+ nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
+ return 0;
+ }
+ id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+ /* Queue Stats */
+ if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+ nb = id / TXGBE_NB_QP_STATS;
+ st = id % TXGBE_NB_QP_STATS;
+ *offset = rte_txgbe_qp_strings[st].offset +
+ nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
+ return 0;
+ }
+
+ return -1;
+}
+
+static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned int limit)
+{
+ unsigned int i, count;
+
+ count = txgbe_xstats_calc_num(dev);
+ if (xstats_names == NULL)
+ return count;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+ limit = min(limit, count);
+
+ /* Extended stats from txgbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ if (txgbe_get_name_by_id(i, xstats_names[i].name,
+ sizeof(xstats_names[i].name))) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i;
+
+ if (ids == NULL)
+ return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
+
+ for (i = 0; i < limit; i++) {
+ if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
+ sizeof(xstats_names[i].name))) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ return -1;
+ }
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int limit)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ unsigned int i, count;
+
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ count = txgbe_xstats_calc_num(dev);
+ if (xstats == NULL)
+ return count;
+
+ limit = min(limit, txgbe_xstats_calc_num(dev));
+
+ /* Extended stats from txgbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ uint32_t offset = 0;
+
+ if (txgbe_get_offset_by_id(i, &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
+ xstats[i].id = i;
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
+ unsigned int limit)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ unsigned int i, count;
+
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ count = txgbe_xstats_calc_num(dev);
+ if (values == NULL)
+ return count;
+
+ limit = min(limit, txgbe_xstats_calc_num(dev));
+
+ /* Extended stats from txgbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ uint32_t offset;
+
+ if (txgbe_get_offset_by_id(i, &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int limit)
+{
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ unsigned int i;
+
+ if (ids == NULL)
+ return txgbe_dev_xstats_get_(dev, values, limit);
+
+ for (i = 0; i < limit; i++) {
+ uint32_t offset;
+
+ if (txgbe_get_offset_by_id(ids[i], &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+
+ /* HW registers are cleared on read */
+ hw->offset_loaded = 0;
+ txgbe_read_stats_registers(hw, hw_stats);
+ hw->offset_loaded = 1;
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+
+ return 0;
+}
+
+static int
+txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ u32 etrack_id;
+ int ret;
+
+ hw->phy.get_fw_version(hw, &etrack_id);
+
+ ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+ if (ret < 0)
+ return -EINVAL;
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (size_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static int
+txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ dev_info->min_rx_bufsize = 1024;
+ dev_info->max_rx_pktlen = 15872;
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+ dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
+ .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
+ .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
+ .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
+ .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+
+ /* Driver-preferred Rx/Tx parameters */
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+
+ return 0;
+}
+
+const uint32_t *
+txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ if (dev->rx_pkt_burst == txgbe_recv_pkts ||
+ dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
+ dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
+ dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
+ return txgbe_get_supported_ptypes();
+
+ return NULL;
+}
+
+void
+txgbe_dev_setup_link_alarm_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ u32 speed;
+ bool autoneg = false;
+
+ speed = hw->phy.autoneg_advertised;
+ if (!speed)
+ hw->mac.get_link_capabilities(hw, &speed, &autoneg);
+
+ hw->mac.setup_link(hw, speed, true);
+
+ intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+int
+txgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_link link;
+ u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ bool link_up;
+ int err;
+ int wait = 1;
+
+ memset(&link, 0, sizeof(link));
+ link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
+
+ hw->mac.get_link_status = true;
+
+ if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
+ return rte_eth_linkstatus_set(dev, &link);
+
+ /* check if it needs to wait to complete, if lsc interrupt is enabled */
+ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
+ wait = 0;
+
+ err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
+
+ if (err != 0) {
+ link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ return rte_eth_linkstatus_set(dev, &link);
+ }
+
+ if (link_up == 0) {
+ if ((hw->subsystem_device_id & 0xFF) ==
+ TXGBE_DEV_ID_KR_KX_KX4) {
+ hw->mac.bp_down_event(hw);
+ } else if (hw->phy.media_type == txgbe_media_type_fiber) {
+ intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
+ rte_eal_alarm_set(10,
+ txgbe_dev_setup_link_alarm_handler, dev);
+ }
+ return rte_eth_linkstatus_set(dev, &link);
+ }
+
+ intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
+ link.link_status = ETH_LINK_UP;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ switch (link_speed) {
+ default:
+ case TXGBE_LINK_SPEED_UNKNOWN:
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+
+ case TXGBE_LINK_SPEED_100M_FULL:
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+
+ case TXGBE_LINK_SPEED_1GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+
+ case TXGBE_LINK_SPEED_2_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+
+ case TXGBE_LINK_SPEED_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_5G;
+ break;
+
+ case TXGBE_LINK_SPEED_10GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return txgbe_dev_link_update_share(dev, wait_to_complete);
+}
+
+static int
+txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fctrl;
+
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+static int
+txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fctrl;
+
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl &= (~TXGBE_PSRCTL_UCP);
+ if (dev->data->all_multicast == 1)
+ fctrl |= TXGBE_PSRCTL_MCP;
+ else
+ fctrl &= (~TXGBE_PSRCTL_MCP);
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+static int
+txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fctrl;
+
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl |= TXGBE_PSRCTL_MCP;
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+static int
+txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fctrl;
+
+ if (dev->data->promiscuous == 1)
+ return 0; /* must remain in all_multicast mode */
+
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl &= (~TXGBE_PSRCTL_MCP);
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+
+ txgbe_dev_link_status_print(dev);
+ if (on)
+ intr->mask_misc |= TXGBE_ICRMISC_LSC;
+ else
+ intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
+
+ return 0;
+}
+
+static int
+txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ u64 mask;
+
+ mask = TXGBE_ICR_MASK;
+ mask &= (1ULL << TXGBE_MISC_VEC_ID);
+ intr->mask |= mask;
+ intr->mask_misc |= TXGBE_ICRMISC_GPIO;
+ intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
+ return 0;
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ u64 mask;
+
+ mask = TXGBE_ICR_MASK;
+ mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1);
+ intr->mask |= mask;
+
+ return 0;
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+
+ intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
+
+ return 0;
+}
+
+/*
+ * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+
+ /* clear all cause mask */
+ txgbe_disable_intr(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
+ PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
+
+ intr->flags = 0;
+
+ /* set flag for async link update */
+ if (eicr & TXGBE_ICRMISC_LSC)
+ intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
+
+ if (eicr & TXGBE_ICRMISC_ANDONE)
+ intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG;
+
+ if (eicr & TXGBE_ICRMISC_VFMBX)
+ intr->flags |= TXGBE_FLAG_MAILBOX;
+
+ if (eicr & TXGBE_ICRMISC_LNKSEC)
+ intr->flags |= TXGBE_FLAG_MACSEC;
+
+ if (eicr & TXGBE_ICRMISC_GPIO)
+ intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
+
+ return 0;
+}
+
+/**
+ * It gets and then prints the link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static void
+txgbe_dev_link_status_print(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_eth_link link;
+
+ rte_eth_linkstatus_get(dev, &link);
+
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+ (int)(dev->data->port_id),
+ (unsigned int)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down",
+ (int)(dev->data->port_id));
+ }
+ PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+}
+
+/*
+ * It executes link_update after knowing an interrupt occurred.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ int64_t timeout;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
+
+ if (intr->flags & TXGBE_FLAG_MAILBOX) {
+ txgbe_pf_mbx_process(dev);
+ intr->flags &= ~TXGBE_FLAG_MAILBOX;
+ }
+
+ if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
+ hw->phy.handle_lasi(hw);
+ intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
+ }
+
+ if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) {
+ if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) {
+ hw->mac.kr_handle(hw);
+ intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG;
+ }
+ }
+
+ if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
+ struct rte_eth_link link;
+
+ /*get the link status before link update, for predicting later*/
+ rte_eth_linkstatus_get(dev, &link);
+
+ txgbe_dev_link_update(dev, 0);
+
+ /* likely to up */
+ if (!link.link_status)
+ /* handle it 1 sec later, wait it being stable */
+ timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
+ /* likely to down */
+ else if ((hw->subsystem_device_id & 0xFF) ==
+ TXGBE_DEV_ID_KR_KX_KX4 &&
+ hw->devarg.auto_neg == 1)
+ /* handle it 2 sec later for backplane AN73 */
+ timeout = 2000;
+ else
+ /* handle it 4 sec later, wait it being stable */
+ timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
+
+ txgbe_dev_link_status_print(dev);
+ if (rte_eal_alarm_set(timeout * 1000,
+ txgbe_dev_interrupt_delayed_handler,
+ (void *)dev) < 0) {
+ PMD_DRV_LOG(ERR, "Error setting alarm");
+ } else {
+ /* only disable lsc interrupt */
+ intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
+
+ intr->mask_orig = intr->mask;
+ /* only disable all misc interrupts */
+ intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID);
+ }
+ }
+
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ txgbe_enable_intr(dev);
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered for alarm callback for delayed
+ * handling specific interrupt to wait for the stable nic state. As the
+ * NIC interrupt state is not stable for txgbe after link is just down,
+ * it needs to wait 4 seconds to get the stable status.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) registered before.
+ *
+ * @return
+ * void
+ */
+static void
+txgbe_dev_interrupt_delayed_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t eicr;
+
+ txgbe_disable_intr(hw);
+
+ eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
+ if (eicr & TXGBE_ICRMISC_VFMBX)
+ txgbe_pf_mbx_process(dev);
+
+ if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
+ hw->phy.handle_lasi(hw);
+ intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
+ }
+
+ if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
+ txgbe_dev_link_update(dev, 0);
+ intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
+ txgbe_dev_link_status_print(dev);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ if (intr->flags & TXGBE_FLAG_MACSEC) {
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+ NULL);
+ intr->flags &= ~TXGBE_FLAG_MACSEC;
+ }
+
+ /* restore original mask */
+ intr->mask_misc |= TXGBE_ICRMISC_LSC;
+
+ intr->mask = intr->mask_orig;
+ intr->mask_orig = 0;
+
+ PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
+ txgbe_enable_intr(dev);
+ rte_intr_enable(intr_handle);
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) registered before.
+ *
+ * @return
+ * void
+ */
+static void
+txgbe_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ txgbe_dev_interrupt_get_status(dev);
+ txgbe_dev_interrupt_action(dev, dev->intr_handle);
+}
+
+static int
+txgbe_dev_led_on(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+
+ hw = TXGBE_DEV_HW(dev);
+ return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
+}
+
+static int
+txgbe_dev_led_off(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+
+ hw = TXGBE_DEV_HW(dev);
+ return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
+}
+
+static int
+txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct txgbe_hw *hw;
+ uint32_t mflcn_reg;
+ uint32_t fccfg_reg;
+ int rx_pause;
+ int tx_pause;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water[0];
+ fc_conf->low_water = hw->fc.low_water[0];
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+ /*
+ * Return rx_pause status according to actual setting of
+ * RXFCCFG register.
+ */
+ mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
+ if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ /*
+ * Return tx_pause status according to actual setting of
+ * TXFCCFG register.
+ */
+ fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
+ if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct txgbe_hw *hw;
+ int err;
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
+ txgbe_fc_none,
+ txgbe_fc_rx_pause,
+ txgbe_fc_tx_pause,
+ txgbe_fc_full
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw = TXGBE_DEV_HW(dev);
+ rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /*
+ * At least reserve one Ethernet frame for watermark
+ * high_water/low_water in kilo bytes for txgbe
+ */
+ max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
+ if (fc_conf->high_water > max_high_water ||
+ fc_conf->high_water < fc_conf->low_water) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+ return -EINVAL;
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water[0] = fc_conf->high_water;
+ hw->fc.low_water[0] = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+ hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
+
+ err = txgbe_fc_enable(hw);
+
+ /* Not negotiated is not an error case */
+ if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
+ wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
+ (fc_conf->mac_ctrl_frame_fwd
+ ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
+ txgbe_flush(hw);
+
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
+ return -EIO;
+}
+
+static int
+txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf)
+{
+ int err;
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint8_t tc_num;
+ uint8_t map[TXGBE_DCB_UP_MAX] = { 0 };
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
+
+ enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
+ txgbe_fc_none,
+ txgbe_fc_rx_pause,
+ txgbe_fc_tx_pause,
+ txgbe_fc_full
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+ tc_num = map[pfc_conf->priority];
+ rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+ /*
+ * At least reserve one Ethernet frame for watermark
+ * high_water/low_water in kilo bytes for txgbe
+ */
+ max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
+ if (pfc_conf->fc.high_water > max_high_water ||
+ pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+ return -EINVAL;
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
+ hw->fc.pause_time = pfc_conf->fc.pause_time;
+ hw->fc.send_xon = pfc_conf->fc.send_xon;
+ hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
+ hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
+
+ err = txgbe_dcb_pfc_enable(hw, tc_num);
+
+ /* Not negotiated is not an error case */
+ if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
+ return 0;
+
+ PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
+ return -EIO;
+}
+
+int
+txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!txgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += 4) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+ if (!mask)
+ continue;
+
+ reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
+ for (j = 0; j < 4; j++) {
+ if (RS8(mask, j, 0x1)) {
+ reta &= ~(MS32(8 * j, 0xFF));
+ reta |= LS32(reta_conf[idx].reta[shift + j],
+ 8 * j, 0xFF);
+ }
+ }
+ wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+ }
+ adapter->rss_reta_updated = 1;
+
+ return 0;
+}
+
+int
+txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += 4) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+ if (!mask)
+ continue;
+
+ reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
+ for (j = 0; j < 4; j++) {
+ if (RS8(mask, j, 0x1))
+ reta_conf[idx].reta[shift + j] =
+ (uint16_t)RS32(reta, 8 * j, 0xFF);
+ }
+ }
+
+ return 0;
+}
+
+static int
+txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t enable_addr = 1;
+
+ return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
+ pool, enable_addr);
+}
+
+static void
+txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ txgbe_clear_rar(hw, index);
+}
+
+static int
+txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ txgbe_remove_rar(dev, 0);
+ txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+
+ return 0;
+}
+
+static int
+txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ struct rte_eth_dev_data *dev_data = dev->data;
+ int ret;
+
+ ret = txgbe_dev_info_get(dev, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ /* check that mtu is within the allowed range */
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
+ return -EINVAL;
+
+ /* If device is started, refuse mtu that requires the support of
+ * scattered packets when this feature has not been enabled before.
+ */
+ if (dev_data->dev_started && !dev_data->scattered_rx &&
+ (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_INIT_LOG(ERR, "Stop port first.");
+ return -EINVAL;
+ }
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ if (hw->mode)
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRAME_SIZE_MAX);
+ else
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(frame_size));
+
+ return 0;
+}
+
+static uint32_t
+txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
+{
+ uint32_t vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 4) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 3) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 2) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((uc_addr->addr_bytes[4]) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+static int
+txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr, uint8_t on)
+{
+ uint32_t vector;
+ uint32_t uta_idx;
+ uint32_t reg_val;
+ uint32_t uta_mask;
+ uint32_t psrctl;
+
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
+
+ /* The UTA table only exists on pf hardware */
+ if (hw->mac.type < txgbe_mac_raptor)
+ return -ENOTSUP;
+
+ vector = txgbe_uta_vector(hw, mac_addr);
+ uta_idx = (vector >> 5) & 0x7F;
+ uta_mask = 0x1UL << (vector & 0x1F);
+
+ if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
+ return 0;
+
+ reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
+ if (on) {
+ uta_info->uta_in_use++;
+ reg_val |= uta_mask;
+ uta_info->uta_shadow[uta_idx] |= uta_mask;
+ } else {
+ uta_info->uta_in_use--;
+ reg_val &= ~uta_mask;
+ uta_info->uta_shadow[uta_idx] &= ~uta_mask;
+ }
+
+ wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
+
+ psrctl = rd32(hw, TXGBE_PSRCTL);
+ if (uta_info->uta_in_use > 0)
+ psrctl |= TXGBE_PSRCTL_UCHFENA;
+ else
+ psrctl &= ~TXGBE_PSRCTL_UCHFENA;
+
+ psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
+ psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, TXGBE_PSRCTL, psrctl);
+
+ return 0;
+}
+
+static int
+txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
+ uint32_t psrctl;
+ int i;
+
+ /* The UTA table only exists on pf hardware */
+ if (hw->mac.type < txgbe_mac_raptor)
+ return -ENOTSUP;
+
+ if (on) {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = ~0;
+ wr32(hw, TXGBE_UCADDRTBL(i), ~0);
+ }
+ } else {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = 0;
+ wr32(hw, TXGBE_UCADDRTBL(i), 0);
+ }
+ }
+
+ psrctl = rd32(hw, TXGBE_PSRCTL);
+ if (on)
+ psrctl |= TXGBE_PSRCTL_UCHFENA;
+ else
+ psrctl &= ~TXGBE_PSRCTL_UCHFENA;
+
+ psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
+ psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, TXGBE_PSRCTL, psrctl);
+
+ return 0;
+}
+
+uint32_t
+txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+ uint32_t new_val = orig_val;
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ new_val |= TXGBE_POOLETHCTL_UTA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ new_val |= TXGBE_POOLETHCTL_MCHA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ new_val |= TXGBE_POOLETHCTL_UCHA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ new_val |= TXGBE_POOLETHCTL_BCA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ new_val |= TXGBE_POOLETHCTL_MCP;
+
+ return new_val;
+}
+
+static int
+txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t mask;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (queue_id < 32) {
+ mask = rd32(hw, TXGBE_IMS(0));
+ mask &= (1 << queue_id);
+ wr32(hw, TXGBE_IMS(0), mask);
+ } else if (queue_id < 64) {
+ mask = rd32(hw, TXGBE_IMS(1));
+ mask &= (1 << (queue_id - 32));
+ wr32(hw, TXGBE_IMS(1), mask);
+ }
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (queue_id < 32) {
+ mask = rd32(hw, TXGBE_IMS(0));
+ mask &= ~(1 << queue_id);
+ wr32(hw, TXGBE_IMS(0), mask);
+ } else if (queue_id < 64) {
+ mask = rd32(hw, TXGBE_IMS(1));
+ mask &= ~(1 << (queue_id - 32));
+ wr32(hw, TXGBE_IMS(1), mask);
+ }
+
+ return 0;
+}
+
+/**
+ * set the IVAR registers, mapping interrupt causes to vectors
+ * @param hw
+ * pointer to txgbe_hw struct
+ * @direction
+ * 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue
+ * queue to map the corresponding interrupt to
+ * @msix_vector
+ * the vector to map to the corresponding queue
+ */
+void
+txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= TXGBE_IVARMISC_VLD;
+ idx = 0;
+ tmp = rd32(hw, TXGBE_IVARMISC);
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ wr32(hw, TXGBE_IVARMISC, tmp);
+ } else {
+ /* rx or tx causes */
+ /* Workround for ICR lost */
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
+ }
+}
+
+/**
+ * Sets up the hardware to properly generate MSI-X interrupts
+ * @hw
+ * board private structure
+ */
+static void
+txgbe_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
+ uint32_t vec = TXGBE_MISC_VEC_ID;
+ uint32_t gpie;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd
+ * but if misx has been enabled already, need to configure
+ * auto clean, auto mask and throttling.
+ */
+ gpie = rd32(hw, TXGBE_GPIE);
+ if (!rte_intr_dp_is_en(intr_handle) &&
+ !(gpie & TXGBE_GPIE_MSIX))
+ return;
+
+ if (rte_intr_allow_others(intr_handle)) {
+ base = TXGBE_RX_VEC_START;
+ vec = base;
+ }
+
+ /* setup GPIE for MSI-x mode */
+ gpie = rd32(hw, TXGBE_GPIE);
+ gpie |= TXGBE_GPIE_MSIX;
+ wr32(hw, TXGBE_GPIE, gpie);
+
+ /* Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ txgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
+ }
+ wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
+ TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | TXGBE_ITR_WRDSA);
+}
+
+int
+txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t tx_rate)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t bcnrc_val;
+
+ if (queue_idx >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (tx_rate != 0) {
+ bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
+ bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
+ */
+ wr32(hw, TXGBE_ARBTXMMW, 0x14);
+
+ /* Set ARBTXRATE of queue X */
+ wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
+ wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+int
+txgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint32_t syn_info;
+ uint32_t synqf;
+
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ syn_info = filter_info->syn_info;
+
+ if (add) {
+ if (syn_info & TXGBE_SYNCLS_ENA)
+ return -EINVAL;
+ synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
+ synqf |= TXGBE_SYNCLS_ENA;
+
+ if (filter->hig_pri)
+ synqf |= TXGBE_SYNCLS_HIPRIO;
+ else
+ synqf &= ~TXGBE_SYNCLS_HIPRIO;
+ } else {
+ synqf = rd32(hw, TXGBE_SYNCLS);
+ if (!(syn_info & TXGBE_SYNCLS_ENA))
+ return -ENOENT;
+ synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
+ }
+
+ filter_info->syn_info = synqf;
+ wr32(hw, TXGBE_SYNCLS, synqf);
+ txgbe_flush(hw);
+ return 0;
+}
+
+static inline enum txgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+ if (protocol_value == IPPROTO_TCP)
+ return TXGBE_5TF_PROT_TCP;
+ else if (protocol_value == IPPROTO_UDP)
+ return TXGBE_5TF_PROT_UDP;
+ else if (protocol_value == IPPROTO_SCTP)
+ return TXGBE_5TF_PROT_SCTP;
+ else
+ return TXGBE_5TF_PROT_NONE;
+}
+
+/* inject a 5-tuple filter to HW */
+static inline void
+txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ struct txgbe_5tuple_filter *filter)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int i;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint32_t mask = TXGBE_5TFCTL0_MASK;
+
+ i = filter->index;
+ sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
+ sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
+
+ ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
+ ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= ~TXGBE_5TFCTL0_MSADDR;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MDADDR;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MSPORT;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MDPORT;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MPROTO;
+ ftqf |= mask;
+ ftqf |= TXGBE_5TFCTL0_MPOOL;
+ ftqf |= TXGBE_5TFCTL0_ENA;
+
+ wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
+ wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
+ wr32(hw, TXGBE_5TFPORT(i), sdpqf);
+ wr32(hw, TXGBE_5TFCTL0(i), ftqf);
+
+ l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
+ wr32(hw, TXGBE_5TFCTL1(i), l34timir);
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: pointer to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+ struct txgbe_5tuple_filter *filter)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ int i, idx, shift;
+
+ /*
+ * look for an unused 5tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
+ idx = i / (sizeof(uint32_t) * NBBY);
+ shift = i % (sizeof(uint32_t) * NBBY);
+ if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+ filter_info->fivetuple_mask[idx] |= 1 << shift;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= TXGBE_MAX_FTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "5tuple filters are full.");
+ return -ENOSYS;
+ }
+
+ txgbe_inject_5tuple_filter(dev, filter);
+
+ return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ struct txgbe_5tuple_filter *filter)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint16_t index = filter->index;
+
+ filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+ ~(1 << (index % (sizeof(uint32_t) * NBBY)));
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
+
+ wr32(hw, TXGBE_5TFDADDR(index), 0);
+ wr32(hw, TXGBE_5TFSADDR(index), 0);
+ wr32(hw, TXGBE_5TFPORT(index), 0);
+ wr32(hw, TXGBE_5TFCTL0(index), 0);
+ wr32(hw, TXGBE_5TFCTL1(index), 0);
+}
+
+static inline struct txgbe_5tuple_filter *
+txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
+ struct txgbe_5tuple_filter_info *key)
+{
+ struct txgbe_5tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct txgbe_5tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter
+ * to struct txgbe_5tuple_filter_info
+ */
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+ struct txgbe_5tuple_filter_info *filter_info)
+{
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > TXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < TXGBE_5TUPLE_MIN_PRI)
+ return -EINVAL;
+
+ switch (filter->dst_ip_mask) {
+ case UINT32_MAX:
+ filter_info->dst_ip_mask = 0;
+ filter_info->dst_ip = filter->dst_ip;
+ break;
+ case 0:
+ filter_info->dst_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_ip_mask) {
+ case UINT32_MAX:
+ filter_info->src_ip_mask = 0;
+ filter_info->src_ip = filter->src_ip;
+ break;
+ case 0:
+ filter_info->src_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_port_mask) {
+ case UINT16_MAX:
+ filter_info->src_port_mask = 0;
+ filter_info->src_port = filter->src_port;
+ break;
+ case 0:
+ filter_info->src_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto =
+ convert_protocol_type(filter->proto);
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ return 0;
+}
+
+/*
+ * add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter,
+ bool add)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter_info filter_5tuple;
+ struct txgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ return -EEXIST;
+ }
+ if (filter == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ if (add) {
+ filter = rte_zmalloc("txgbe_5tuple_filter",
+ sizeof(struct txgbe_5tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+ rte_memcpy(&filter->filter_info,
+ &filter_5tuple,
+ sizeof(struct txgbe_5tuple_filter_info));
+ filter->queue = ntuple_filter->queue;
+ ret = txgbe_add_5tuple_filter(dev, filter);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+ } else {
+ txgbe_remove_5tuple_filter(dev, filter);
+ }
+
+ return 0;
+}
+
+int
+txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint32_t etqf = 0;
+ uint32_t etqs = 0;
+ int ret;
+ struct txgbe_ethertype_filter ethertype_filter;
+
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", filter->ether_type);
+ return -EINVAL;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+ return -EINVAL;
+ }
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ PMD_DRV_LOG(ERR, "drop option is unsupported.");
+ return -EINVAL;
+ }
+
+ ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret >= 0 && add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+ filter->ether_type);
+ return -EEXIST;
+ }
+ if (ret < 0 && !add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ if (add) {
+ etqf = TXGBE_ETFLT_ENA;
+ etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
+ etqs |= TXGBE_ETCLS_QPID(filter->queue);
+ etqs |= TXGBE_ETCLS_QENA;
+
+ ethertype_filter.ethertype = filter->ether_type;
+ ethertype_filter.etqf = etqf;
+ ethertype_filter.etqs = etqs;
+ ethertype_filter.conf = FALSE;
+ ret = txgbe_ethertype_filter_insert(filter_info,
+ ðertype_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSPC;
+ }
+ } else {
+ ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+ if (ret < 0)
+ return -ENOSYS;
+ }
+ wr32(hw, TXGBE_ETFLT(ret), etqf);
+ wr32(hw, TXGBE_ETCLS(ret), etqs);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+static int
+txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
+{
+ *ops = &txgbe_flow_ops;
+ return 0;
+}
+
+static u8 *
+txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
+ u8 **mc_addr_ptr, u32 *vmdq)
+{
+ u8 *mc_addr;
+
+ *vmdq = 0;
+ mc_addr = *mc_addr_ptr;
+ *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
+ return mc_addr;
+}
+
+int
+txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct txgbe_hw *hw;
+ u8 *mc_addr_list;
+
+ hw = TXGBE_DEV_HW(dev);
+ mc_addr_list = (u8 *)mc_addr_set;
+ return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+ txgbe_dev_addr_list_itr, TRUE);
+}
+
+static uint64_t
+txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint64_t systime_cycles;
+
+ systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
+ systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
+
+ return systime_cycles;
+}
+
+static uint64_t
+txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint64_t rx_tstamp_cycles;
+
+ /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
+ rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
+
+ return rx_tstamp_cycles;
+}
+
+static uint64_t
+txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint64_t tx_tstamp_cycles;
+
+ /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
+ tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
+
+ return tx_tstamp_cycles;
+}
+
+static void
+txgbe_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct rte_eth_link link;
+ uint32_t incval = 0;
+ uint32_t shift = 0;
+
+ /* Get current link speed. */
+ txgbe_dev_link_update(dev, 1);
+ rte_eth_linkstatus_get(dev, &link);
+
+ switch (link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ incval = TXGBE_INCVAL_100;
+ shift = TXGBE_INCVAL_SHIFT_100;
+ break;
+ case ETH_SPEED_NUM_1G:
+ incval = TXGBE_INCVAL_1GB;
+ shift = TXGBE_INCVAL_SHIFT_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ default:
+ incval = TXGBE_INCVAL_10GB;
+ shift = TXGBE_INCVAL_SHIFT_10GB;
+ break;
+ }
+
+ wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));