static int txgbe_dev_close(struct rte_eth_dev *dev);
static int txgbe_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
+static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
+static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
+ uint16_t queue);
static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static void txgbe_dev_interrupt_delayed_handler(void *param);
static void txgbe_configure_msix(struct rte_eth_dev *dev);
+#define TXGBE_SET_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] |= 1 << bit;\
+ } while (0)
+
+#define TXGBE_CLEAR_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] &= ~(1 << bit);\
+ } while (0)
+
+#define TXGBE_GET_HWSTRIP(h, q, r) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (r) = (h)->bitmap[idx] >> bit & 1;\
+ } while (0)
+
/*
* The set of PCI devices this driver supports
*/
static const struct eth_dev_ops txgbe_eth_dev_ops;
+#define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
+#define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
+ /* MNG RxTx */
+ HW_XSTAT(mng_bmc2host_packets),
+ HW_XSTAT(mng_host2bmc_packets),
+ /* Basic RxTx */
+ HW_XSTAT(rx_packets),
+ HW_XSTAT(tx_packets),
+ HW_XSTAT(rx_bytes),
+ HW_XSTAT(tx_bytes),
+ HW_XSTAT(rx_total_bytes),
+ HW_XSTAT(rx_total_packets),
+ HW_XSTAT(tx_total_packets),
+ HW_XSTAT(rx_total_missed_packets),
+ HW_XSTAT(rx_broadcast_packets),
+ HW_XSTAT(rx_multicast_packets),
+ HW_XSTAT(rx_management_packets),
+ HW_XSTAT(tx_management_packets),
+ HW_XSTAT(rx_management_dropped),
+
+ /* Basic Error */
+ HW_XSTAT(rx_crc_errors),
+ HW_XSTAT(rx_illegal_byte_errors),
+ HW_XSTAT(rx_error_bytes),
+ HW_XSTAT(rx_mac_short_packet_dropped),
+ HW_XSTAT(rx_length_errors),
+ HW_XSTAT(rx_undersize_errors),
+ HW_XSTAT(rx_fragment_errors),
+ HW_XSTAT(rx_oversize_errors),
+ HW_XSTAT(rx_jabber_errors),
+ HW_XSTAT(rx_l3_l4_xsum_error),
+ HW_XSTAT(mac_local_errors),
+ HW_XSTAT(mac_remote_errors),
+
+ /* Flow Director */
+ HW_XSTAT(flow_director_added_filters),
+ HW_XSTAT(flow_director_removed_filters),
+ HW_XSTAT(flow_director_filter_add_errors),
+ HW_XSTAT(flow_director_filter_remove_errors),
+ HW_XSTAT(flow_director_matched_filters),
+ HW_XSTAT(flow_director_missed_filters),
+
+ /* FCoE */
+ HW_XSTAT(rx_fcoe_crc_errors),
+ HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
+ HW_XSTAT(rx_fcoe_dropped),
+ HW_XSTAT(rx_fcoe_packets),
+ HW_XSTAT(tx_fcoe_packets),
+ HW_XSTAT(rx_fcoe_bytes),
+ HW_XSTAT(tx_fcoe_bytes),
+ HW_XSTAT(rx_fcoe_no_ddp),
+ HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
+
+ /* MACSEC */
+ HW_XSTAT(tx_macsec_pkts_untagged),
+ HW_XSTAT(tx_macsec_pkts_encrypted),
+ HW_XSTAT(tx_macsec_pkts_protected),
+ HW_XSTAT(tx_macsec_octets_encrypted),
+ HW_XSTAT(tx_macsec_octets_protected),
+ HW_XSTAT(rx_macsec_pkts_untagged),
+ HW_XSTAT(rx_macsec_pkts_badtag),
+ HW_XSTAT(rx_macsec_pkts_nosci),
+ HW_XSTAT(rx_macsec_pkts_unknownsci),
+ HW_XSTAT(rx_macsec_octets_decrypted),
+ HW_XSTAT(rx_macsec_octets_validated),
+ HW_XSTAT(rx_macsec_sc_pkts_unchecked),
+ HW_XSTAT(rx_macsec_sc_pkts_delayed),
+ HW_XSTAT(rx_macsec_sc_pkts_late),
+ HW_XSTAT(rx_macsec_sa_pkts_ok),
+ HW_XSTAT(rx_macsec_sa_pkts_invalid),
+ HW_XSTAT(rx_macsec_sa_pkts_notvalid),
+ HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
+ HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
+
+ /* MAC RxTx */
+ HW_XSTAT(rx_size_64_packets),
+ HW_XSTAT(rx_size_65_to_127_packets),
+ HW_XSTAT(rx_size_128_to_255_packets),
+ HW_XSTAT(rx_size_256_to_511_packets),
+ HW_XSTAT(rx_size_512_to_1023_packets),
+ HW_XSTAT(rx_size_1024_to_max_packets),
+ HW_XSTAT(tx_size_64_packets),
+ HW_XSTAT(tx_size_65_to_127_packets),
+ HW_XSTAT(tx_size_128_to_255_packets),
+ HW_XSTAT(tx_size_256_to_511_packets),
+ HW_XSTAT(tx_size_512_to_1023_packets),
+ HW_XSTAT(tx_size_1024_to_max_packets),
+
+ /* Flow Control */
+ HW_XSTAT(tx_xon_packets),
+ HW_XSTAT(rx_xon_packets),
+ HW_XSTAT(tx_xoff_packets),
+ HW_XSTAT(rx_xoff_packets),
+
+ HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
+ HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
+ HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
+ HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
+};
+
+#define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
+ sizeof(rte_txgbe_stats_strings[0]))
+
+/* Per-priority statistics */
+#define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
+ UP_XSTAT(rx_up_packets),
+ UP_XSTAT(tx_up_packets),
+ UP_XSTAT(rx_up_bytes),
+ UP_XSTAT(tx_up_bytes),
+ UP_XSTAT(rx_up_drop_packets),
+
+ UP_XSTAT(tx_up_xon_packets),
+ UP_XSTAT(rx_up_xon_packets),
+ UP_XSTAT(tx_up_xoff_packets),
+ UP_XSTAT(rx_up_xoff_packets),
+ UP_XSTAT(rx_up_dropped),
+ UP_XSTAT(rx_up_mbuf_alloc_errors),
+ UP_XSTAT(tx_up_xon2off_packets),
+};
+
+#define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
+ sizeof(rte_txgbe_up_strings[0]))
+
+/* Per-queue statistics */
+#define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
+ QP_XSTAT(rx_qp_packets),
+ QP_XSTAT(tx_qp_packets),
+ QP_XSTAT(rx_qp_bytes),
+ QP_XSTAT(tx_qp_bytes),
+ QP_XSTAT(rx_qp_mc_packets),
+};
+
+#define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
+ sizeof(rte_txgbe_qp_strings[0]))
+
static inline int
txgbe_is_sfp(struct txgbe_hw *hw)
{
}
}
+static inline int32_t
+txgbe_pf_reset_hw(struct txgbe_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t status;
+
+ status = hw->mac.reset_hw(hw);
+
+ ctrl_ext = rd32(hw, TXGBE_PORTCTL);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
+ wr32(hw, TXGBE_PORTCTL, ctrl_ext);
+ txgbe_flush(hw);
+
+ if (status == TXGBE_ERR_SFP_NOT_PRESENT)
+ status = 0;
+ return status;
+}
+
static inline void
txgbe_enable_intr(struct rte_eth_dev *dev)
{
txgbe_flush(hw);
}
+static int
+txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_stat_mappings *stat_mappings =
+ TXGBE_DEV_STAT_MAPPINGS(eth_dev);
+ uint32_t qsmr_mask = 0;
+ uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
+ uint32_t q_map;
+ uint8_t n, offset;
+
+ if (hw->mac.type != txgbe_mac_raptor)
+ return -ENOSYS;
+
+ if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
+ return -EIO;
+
+ PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
+ (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+ queue_id, stat_idx);
+
+ n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
+ if (n >= TXGBE_NB_STAT_MAPPING) {
+ PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
+ return -EIO;
+ }
+ offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
+
+ /* Now clear any previous stat_idx set */
+ clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+ if (!is_rx)
+ stat_mappings->tqsm[n] &= ~clearing_mask;
+ else
+ stat_mappings->rqsm[n] &= ~clearing_mask;
+
+ q_map = (uint32_t)stat_idx;
+ q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
+ qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+ if (!is_rx)
+ stat_mappings->tqsm[n] |= qsmr_mask;
+ else
+ stat_mappings->rqsm[n] |= qsmr_mask;
+
+ PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
+ (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+ queue_id, stat_idx);
+ PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+ is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
+ return 0;
+}
+
static int
eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
+ struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
const struct rte_memzone *mz;
uint16_t csum;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &txgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX and TX function.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ struct txgbe_tx_queue *txq;
+ /* TX queue function in primary, set by last queue initialized
+ * Tx queue may not initialized by primary process
+ */
+ if (eth_dev->data->tx_queues) {
+ uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
+ txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
+ txgbe_set_tx_function(eth_dev, txq);
+ } else {
+ /* Use default TX function if we get here */
+ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
+ "Using default TX function.");
+ }
+
+ txgbe_set_rx_function(eth_dev);
+
+ return 0;
+ }
rte_eth_copy_pci_info(eth_dev, pci_dev);
return -EIO;
}
+ /* Reset the hw statistics */
+ txgbe_dev_stats_reset(eth_dev);
+
/* disable interrupt */
txgbe_disable_intr(hw);
return -ENOMEM;
}
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* initialize the hw strip bitmap*/
+ memset(hwstrip, 0, sizeof(*hwstrip));
+
if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
(int)hw->mac.type, (int)hw->phy.type,
.remove = eth_txgbe_pci_remove,
};
+static int
+txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
+ vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_rx_queue *rxq;
+ bool restart;
+ uint32_t rxcfg, rxbal, rxbah;
+
+ if (on)
+ txgbe_vlan_hw_strip_enable(dev, queue);
+ else
+ txgbe_vlan_hw_strip_disable(dev, queue);
+
+ rxq = dev->data->rx_queues[queue];
+ rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
+ rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
+ rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ restart = (rxcfg & TXGBE_RXCFG_ENA) &&
+ !(rxcfg & TXGBE_RXCFG_VLAN);
+ rxcfg |= TXGBE_RXCFG_VLAN;
+ } else {
+ restart = (rxcfg & TXGBE_RXCFG_ENA) &&
+ (rxcfg & TXGBE_RXCFG_VLAN);
+ rxcfg &= ~TXGBE_RXCFG_VLAN;
+ }
+ rxcfg &= ~TXGBE_RXCFG_ENA;
+
+ if (restart) {
+ /* set vlan strip for ring */
+ txgbe_dev_rx_queue_stop(dev, queue);
+ wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
+ wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
+ wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
+ txgbe_dev_rx_queue_start(dev, queue);
+ }
+}
+
+static int
+txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret = 0;
+ uint32_t portctrl, vlan_ext, qinq;
+
+ portctrl = rd32(hw, TXGBE_PORTCTL);
+
+ vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
+ qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
+ switch (vlan_type) {
+ case ETH_VLAN_TYPE_INNER:
+ if (vlan_ext) {
+ wr32m(hw, TXGBE_VLANCTL,
+ TXGBE_VLANCTL_TPID_MASK,
+ TXGBE_VLANCTL_TPID(tpid));
+ wr32m(hw, TXGBE_DMATXCTRL,
+ TXGBE_DMATXCTRL_TPID_MASK,
+ TXGBE_DMATXCTRL_TPID(tpid));
+ } else {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Inner type is not supported"
+ " by single VLAN");
+ }
+
+ if (qinq) {
+ wr32m(hw, TXGBE_TAGTPID(0),
+ TXGBE_TAGTPID_LSB_MASK,
+ TXGBE_TAGTPID_LSB(tpid));
+ }
+ break;
+ case ETH_VLAN_TYPE_OUTER:
+ if (vlan_ext) {
+ /* Only the high 16-bits is valid */
+ wr32m(hw, TXGBE_EXTAG,
+ TXGBE_EXTAG_VLAN_MASK,
+ TXGBE_EXTAG_VLAN(tpid));
+ } else {
+ wr32m(hw, TXGBE_VLANCTL,
+ TXGBE_VLANCTL_TPID_MASK,
+ TXGBE_VLANCTL_TPID(tpid));
+ wr32m(hw, TXGBE_DMATXCTRL,
+ TXGBE_DMATXCTRL_TPID_MASK,
+ TXGBE_DMATXCTRL_TPID(tpid));
+ }
+
+ if (qinq) {
+ wr32m(hw, TXGBE_TAGTPID(0),
+ TXGBE_TAGTPID_MSB_MASK,
+ TXGBE_TAGTPID_MSB(tpid));
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+void
+txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t vlnctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Disable */
+ vlnctrl = rd32(hw, TXGBE_VLANCTL);
+ vlnctrl &= ~TXGBE_VLANCTL_VFE;
+ wr32(hw, TXGBE_VLANCTL, vlnctrl);
+}
+
+void
+txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
+ uint32_t vlnctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Enable */
+ vlnctrl = rd32(hw, TXGBE_VLANCTL);
+ vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
+ vlnctrl |= TXGBE_VLANCTL_VFE;
+ wr32(hw, TXGBE_VLANCTL, vlnctrl);
+
+ /* write whatever is in local vfta copy */
+ for (i = 0; i < TXGBE_VFTA_SIZE; i++)
+ wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
+}
+
+void
+txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
+{
+ struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
+ struct txgbe_rx_queue *rxq;
+
+ if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
+ return;
+
+ if (on)
+ TXGBE_SET_HWSTRIP(hwstrip, queue);
+ else
+ TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+ if (queue >= dev->data->nb_rx_queues)
+ return;
+
+ rxq = dev->data->rx_queues[queue];
+
+ if (on) {
+ rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ } else {
+ rxq->vlan_flags = PKT_RX_VLAN;
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+}
+
+static void
+txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, TXGBE_RXCFG(queue));
+ ctrl &= ~TXGBE_RXCFG_VLAN;
+ wr32(hw, TXGBE_RXCFG(queue), ctrl);
+
+ /* record those setting for HW strip per queue */
+ txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
+}
+
+static void
+txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, TXGBE_RXCFG(queue));
+ ctrl |= TXGBE_RXCFG_VLAN;
+ wr32(hw, TXGBE_RXCFG(queue), ctrl);
+
+ /* record those setting for HW strip per queue */
+ txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
+}
+
+static void
+txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, TXGBE_PORTCTL);
+ ctrl &= ~TXGBE_PORTCTL_VLANEXT;
+ ctrl &= ~TXGBE_PORTCTL_QINQ;
+ wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+static void
+txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, TXGBE_PORTCTL);
+ ctrl |= TXGBE_PORTCTL_VLANEXT;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
+ txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+ ctrl |= TXGBE_PORTCTL_QINQ;
+ wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+void
+txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+ struct txgbe_rx_queue *rxq;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ txgbe_vlan_strip_queue_set(dev, i, 1);
+ else
+ txgbe_vlan_strip_queue_set(dev, i, 0);
+ }
+}
+
+void
+txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
+{
+ uint16_t i;
+ struct rte_eth_rxmode *rxmode;
+ struct txgbe_rx_queue *rxq;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ else
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ }
+}
+
+static int
+txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK)
+ txgbe_vlan_hw_strip_config(dev);
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ txgbe_vlan_hw_filter_enable(dev);
+ else
+ txgbe_vlan_hw_filter_disable(dev);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ txgbe_vlan_hw_extend_enable(dev);
+ else
+ txgbe_vlan_hw_extend_disable(dev);
+ }
+
+ return 0;
+}
+
+static int
+txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ txgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ txgbe_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
static int
txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
{
intr->mask_misc |= TXGBE_ICRMISC_GPIO;
}
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+txgbe_dev_start(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+ int err;
+ bool link_up = false, negotiate = 0;
+ uint32_t speed = 0;
+ uint32_t allowed_speeds = 0;
+ int mask = 0;
+ int status;
+ uint32_t *link_speeds;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* TXGBE devices don't support:
+ * - half duplex (checked afterwards for valid speeds)
+ * - fixed speed: TODO implement
+ */
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, fix speed not supported",
+ dev->data->port_id);
+ return -EINVAL;
+ }
+
+ /* Stop the link setup handler before resetting the HW. */
+ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* stop adapter */
+ hw->adapter_stopped = 0;
+ txgbe_stop_hw(hw);
+
+ /* reinitialize adapter
+ * this calls reset and start
+ */
+ hw->nb_rx_queues = dev->data->nb_rx_queues;
+ hw->nb_tx_queues = dev->data->nb_tx_queues;
+ status = txgbe_pf_reset_hw(hw);
+ if (status != 0)
+ return -1;
+ hw->mac.start_hw(hw);
+ hw->mac.get_link_status = true;
+
+ txgbe_dev_phy_intr_setup(dev);
+
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* confiugre msix for sleep until rx interrupt */
+ txgbe_configure_msix(dev);
+
+ /* initialize transmission unit */
+ txgbe_dev_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ err = txgbe_dev_rx_init(dev);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ goto error;
+ }
+
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ err = txgbe_vlan_offload_config(dev, mask);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+ goto error;
+ }
+
+ err = txgbe_dev_rxtx_start(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
+ goto error;
+ }
+
+ /* Skip link setup if loopback mode is enabled. */
+ if (hw->mac.type == txgbe_mac_raptor &&
+ dev->data->dev_conf.lpbk_mode)
+ goto skip_link_setup;
+
+ if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
+ err = hw->mac.setup_sfp(hw);
+ if (err)
+ goto error;
+ }
+
+ if (hw->phy.media_type == txgbe_media_type_copper) {
+ /* Turn on the copper */
+ hw->phy.set_phy_power(hw, true);
+ } else {
+ /* Turn on the laser */
+ hw->mac.enable_tx_laser(hw);
+ }
+
+ err = hw->mac.check_link(hw, &speed, &link_up, 0);
+ if (err)
+ goto error;
+ dev->data->dev_link.link_status = link_up;
+
+ err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
+ if (err)
+ goto error;
+
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G;
+
+ link_speeds = &dev->data->dev_conf.link_speeds;
+ if (*link_speeds & ~allowed_speeds) {
+ PMD_INIT_LOG(ERR, "Invalid link setting");
+ goto error;
+ }
+
+ speed = 0x0;
+ if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ speed = (TXGBE_LINK_SPEED_100M_FULL |
+ TXGBE_LINK_SPEED_1GB_FULL |
+ TXGBE_LINK_SPEED_10GB_FULL);
+ } else {
+ if (*link_speeds & ETH_LINK_SPEED_10G)
+ speed |= TXGBE_LINK_SPEED_10GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_5G)
+ speed |= TXGBE_LINK_SPEED_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_1G)
+ speed |= TXGBE_LINK_SPEED_1GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_100M)
+ speed |= TXGBE_LINK_SPEED_100M_FULL;
+ }
+
+ err = hw->mac.setup_link(hw, speed, link_up);
+ if (err)
+ goto error;
+
+skip_link_setup:
+
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ txgbe_dev_lsc_interrupt_setup(dev, TRUE);
+ else
+ txgbe_dev_lsc_interrupt_setup(dev, FALSE);
+ txgbe_dev_macsec_interrupt_setup(dev);
+ txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ txgbe_dev_interrupt_handler, dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex");
+ }
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_dp_is_en(intr_handle))
+ txgbe_dev_rxq_interrupt_setup(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* resume enabled intr since hw reset */
+ txgbe_enable_intr(dev);
+
+ /*
+ * Update link status right before return, because it may
+ * start link configuration process in a separate thread.
+ */
+ txgbe_dev_link_update(dev, 0);
+
+ wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
+
+ txgbe_read_stats_registers(hw, hw_stats);
+ hw->offset_loaded = 1;
+
+ return 0;
+
+error:
+ PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
+ txgbe_dev_clear_queues(dev);
+ return -EIO;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static int
+txgbe_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ if (hw->adapter_stopped)
+ return 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
+
+ /* disable interrupts */
+ txgbe_disable_intr(hw);
+
+ /* reset the NIC */
+ txgbe_pf_reset_hw(hw);
+ hw->adapter_stopped = 0;
+
+ /* stop adapter */
+ txgbe_stop_hw(hw);
+
+ if (hw->phy.media_type == txgbe_media_type_copper) {
+ /* Turn off the copper */
+ hw->phy.set_phy_power(hw, false);
+ } else {
+ /* Turn off the laser */
+ hw->mac.disable_tx_laser(hw);
+ }
+
+ txgbe_dev_clear_queues(dev);
+
+ /* Clear stored conf */
+ dev->data->scattered_rx = 0;
+ dev->data->lro = 0;
+
+ /* Clear recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ txgbe_dev_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
+
+ hw->adapter_stopped = true;
+ dev->data->dev_started = 0;
+
+ return 0;
+}
+
/*
* Set device link up: enable tx.
*/
static int
txgbe_dev_close(struct rte_eth_dev *dev)
{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int retries = 0;
PMD_INIT_FUNC_TRACE();
+ txgbe_pf_reset_hw(hw);
+
+ ret = txgbe_dev_stop(dev);
+
txgbe_dev_free_queues(dev);
+ /* reprogram the RAR[0] in case user changed it. */
+ txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
+
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
rte_free(dev->data->hash_mac_addrs);
dev->data->hash_mac_addrs = NULL;
+ return ret;
+}
+
+/*
+ * Reset PF device.
+ */
+static int
+txgbe_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ /* When a DPDK PMD PF begin to reset PF port, it should notify all
+ * its VF to make them align with it. The detailed notification
+ * mechanism is PMD specific. As to txgbe PF, it is rather complex.
+ * To avoid unexpected behavior in VF, currently reset of PF with
+ * SR-IOV activation is not supported. It might be supported later.
+ */
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = eth_txgbe_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_txgbe_dev_init(dev, NULL);
+
+ return ret;
+}
+
+#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ uint32_t current_counter = rd32(hw, reg); \
+ if (current_counter < last_counter) \
+ current_counter += 0x100000000LL; \
+ if (!hw->offset_loaded) \
+ last_counter = current_counter; \
+ counter = current_counter - last_counter; \
+ counter &= 0xFFFFFFFFLL; \
+ }
+
+#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
+ uint64_t current_counter_msb = rd32(hw, reg_msb); \
+ uint64_t current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ current_counter += 0x1000000000LL; \
+ if (!hw->offset_loaded) \
+ last_counter = current_counter; \
+ counter = current_counter - last_counter; \
+ counter &= 0xFFFFFFFFFLL; \
+ }
+
+void
+txgbe_read_stats_registers(struct txgbe_hw *hw,
+ struct txgbe_hw_stats *hw_stats)
+{
+ unsigned int i;
+
+ /* QP Stats */
+ for (i = 0; i < hw->nb_rx_queues; i++) {
+ UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
+ hw->qp_last[i].rx_qp_packets,
+ hw_stats->qp[i].rx_qp_packets);
+ UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
+ hw->qp_last[i].rx_qp_bytes,
+ hw_stats->qp[i].rx_qp_bytes);
+ UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
+ hw->qp_last[i].rx_qp_mc_packets,
+ hw_stats->qp[i].rx_qp_mc_packets);
+ }
+
+ for (i = 0; i < hw->nb_tx_queues; i++) {
+ UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
+ hw->qp_last[i].tx_qp_packets,
+ hw_stats->qp[i].tx_qp_packets);
+ UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
+ hw->qp_last[i].tx_qp_bytes,
+ hw_stats->qp[i].tx_qp_bytes);
+ }
+ /* PB Stats */
+ for (i = 0; i < TXGBE_MAX_UP; i++) {
+ hw_stats->up[i].rx_up_xon_packets +=
+ rd32(hw, TXGBE_PBRXUPXON(i));
+ hw_stats->up[i].rx_up_xoff_packets +=
+ rd32(hw, TXGBE_PBRXUPXOFF(i));
+ hw_stats->up[i].tx_up_xon_packets +=
+ rd32(hw, TXGBE_PBTXUPXON(i));
+ hw_stats->up[i].tx_up_xoff_packets +=
+ rd32(hw, TXGBE_PBTXUPXOFF(i));
+ hw_stats->up[i].tx_up_xon2off_packets +=
+ rd32(hw, TXGBE_PBTXUPOFF(i));
+ hw_stats->up[i].rx_up_dropped +=
+ rd32(hw, TXGBE_PBRXMISS(i));
+ }
+ hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
+ hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
+ hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
+ hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
+
+ /* DMA Stats */
+ hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
+ hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
+
+ hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
+ hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
+ hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
+
+ /* MAC Stats */
+ hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
+ hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
+ hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
+
+ hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
+ hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
+ hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
+
+ hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
+ hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
+
+ hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
+ hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
+ hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
+ hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
+ hw_stats->rx_size_512_to_1023_packets +=
+ rd64(hw, TXGBE_MACRX512TO1023L);
+ hw_stats->rx_size_1024_to_max_packets +=
+ rd64(hw, TXGBE_MACRX1024TOMAXL);
+ hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
+ hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
+ hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
+ hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
+ hw_stats->tx_size_512_to_1023_packets +=
+ rd64(hw, TXGBE_MACTX512TO1023L);
+ hw_stats->tx_size_1024_to_max_packets +=
+ rd64(hw, TXGBE_MACTX1024TOMAXL);
+
+ hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
+ hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
+ hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
+
+ /* MNG Stats */
+ hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
+ hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
+ hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
+ hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
+
+ /* FCoE Stats */
+ hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
+ hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
+ hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
+ hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
+ hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
+ hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
+ hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
+
+ /* Flow Director Stats */
+ hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
+ hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
+ hw_stats->flow_director_added_filters +=
+ TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
+ hw_stats->flow_director_removed_filters +=
+ TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
+ hw_stats->flow_director_filter_add_errors +=
+ TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
+ hw_stats->flow_director_filter_remove_errors +=
+ TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
+
+ /* MACsec Stats */
+ hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
+ hw_stats->tx_macsec_pkts_encrypted +=
+ rd32(hw, TXGBE_LSECTX_ENCPKT);
+ hw_stats->tx_macsec_pkts_protected +=
+ rd32(hw, TXGBE_LSECTX_PROTPKT);
+ hw_stats->tx_macsec_octets_encrypted +=
+ rd32(hw, TXGBE_LSECTX_ENCOCT);
+ hw_stats->tx_macsec_octets_protected +=
+ rd32(hw, TXGBE_LSECTX_PROTOCT);
+ hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
+ hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
+ hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
+ hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
+ hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
+ hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
+ hw_stats->rx_macsec_sc_pkts_unchecked +=
+ rd32(hw, TXGBE_LSECRX_UNCHKPKT);
+ hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
+ hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
+ for (i = 0; i < 2; i++) {
+ hw_stats->rx_macsec_sa_pkts_ok +=
+ rd32(hw, TXGBE_LSECRX_OKPKT(i));
+ hw_stats->rx_macsec_sa_pkts_invalid +=
+ rd32(hw, TXGBE_LSECRX_INVPKT(i));
+ hw_stats->rx_macsec_sa_pkts_notvalid +=
+ rd32(hw, TXGBE_LSECRX_BADPKT(i));
+ }
+ hw_stats->rx_macsec_sa_pkts_unusedsa +=
+ rd32(hw, TXGBE_LSECRX_INVSAPKT);
+ hw_stats->rx_macsec_sa_pkts_notusingsa +=
+ rd32(hw, TXGBE_LSECRX_BADSAPKT);
+
+ hw_stats->rx_total_missed_packets = 0;
+ for (i = 0; i < TXGBE_MAX_UP; i++) {
+ hw_stats->rx_total_missed_packets +=
+ hw_stats->up[i].rx_up_dropped;
+ }
+}
+
+static int
+txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ struct txgbe_stat_mappings *stat_mappings =
+ TXGBE_DEV_STAT_MAPPINGS(dev);
+ uint32_t i, j;
+
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ if (stats == NULL)
+ return -EINVAL;
+
+ /* Fill out the rte_eth_stats statistics structure */
+ stats->ipackets = hw_stats->rx_packets;
+ stats->ibytes = hw_stats->rx_bytes;
+ stats->opackets = hw_stats->tx_packets;
+ stats->obytes = hw_stats->tx_bytes;
+
+ memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
+ memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
+ memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
+ memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
+ memset(&stats->q_errors, 0, sizeof(stats->q_errors));
+ for (i = 0; i < TXGBE_MAX_QP; i++) {
+ uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
+ uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
+ uint32_t q_map;
+
+ q_map = (stat_mappings->rqsm[n] >> offset)
+ & QMAP_FIELD_RESERVED_BITS_MASK;
+ j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+ ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
+ stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
+
+ q_map = (stat_mappings->tqsm[n] >> offset)
+ & QMAP_FIELD_RESERVED_BITS_MASK;
+ j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+ ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
+ stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
+ }
+
+ /* Rx Errors */
+ stats->imissed = hw_stats->rx_total_missed_packets;
+ stats->ierrors = hw_stats->rx_crc_errors +
+ hw_stats->rx_mac_short_packet_dropped +
+ hw_stats->rx_length_errors +
+ hw_stats->rx_undersize_errors +
+ hw_stats->rx_oversize_errors +
+ hw_stats->rx_drop_packets +
+ hw_stats->rx_illegal_byte_errors +
+ hw_stats->rx_error_bytes +
+ hw_stats->rx_fragment_errors +
+ hw_stats->rx_fcoe_crc_errors +
+ hw_stats->rx_fcoe_mbuf_allocation_errors;
+
+ /* Tx Errors */
+ stats->oerrors = 0;
+ return 0;
+}
+
+static int
+txgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+
+ /* HW registers are cleared on read */
+ hw->offset_loaded = 0;
+ txgbe_dev_stats_get(dev, NULL);
+ hw->offset_loaded = 1;
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+
+ return 0;
+}
+
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+txgbe_xstats_calc_num(struct rte_eth_dev *dev)
+{
+ int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+ return TXGBE_NB_HW_STATS +
+ TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
+ TXGBE_NB_QP_STATS * nb_queues;
+}
+
+static inline int
+txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
+{
+ int nb, st;
+
+ /* Extended stats from txgbe_hw_stats */
+ if (id < TXGBE_NB_HW_STATS) {
+ snprintf(name, size, "[hw]%s",
+ rte_txgbe_stats_strings[id].name);
+ return 0;
+ }
+ id -= TXGBE_NB_HW_STATS;
+
+ /* Priority Stats */
+ if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+ nb = id / TXGBE_NB_UP_STATS;
+ st = id % TXGBE_NB_UP_STATS;
+ snprintf(name, size, "[p%u]%s", nb,
+ rte_txgbe_up_strings[st].name);
+ return 0;
+ }
+ id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+ /* Queue Stats */
+ if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+ nb = id / TXGBE_NB_QP_STATS;
+ st = id % TXGBE_NB_QP_STATS;
+ snprintf(name, size, "[q%u]%s", nb,
+ rte_txgbe_qp_strings[st].name);
+ return 0;
+ }
+ id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
+
+ return -(int)(id + 1);
+}
+
+static inline int
+txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
+{
+ int nb, st;
+
+ /* Extended stats from txgbe_hw_stats */
+ if (id < TXGBE_NB_HW_STATS) {
+ *offset = rte_txgbe_stats_strings[id].offset;
+ return 0;
+ }
+ id -= TXGBE_NB_HW_STATS;
+
+ /* Priority Stats */
+ if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+ nb = id / TXGBE_NB_UP_STATS;
+ st = id % TXGBE_NB_UP_STATS;
+ *offset = rte_txgbe_up_strings[st].offset +
+ nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
+ return 0;
+ }
+ id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+ /* Queue Stats */
+ if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+ nb = id / TXGBE_NB_QP_STATS;
+ st = id % TXGBE_NB_QP_STATS;
+ *offset = rte_txgbe_qp_strings[st].offset +
+ nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
+ return 0;
+ }
+ id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
+
+ return -(int)(id + 1);
+}
+
+static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned int limit)
+{
+ unsigned int i, count;
+
+ count = txgbe_xstats_calc_num(dev);
+ if (xstats_names == NULL)
+ return count;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+ limit = min(limit, count);
+
+ /* Extended stats from txgbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ if (txgbe_get_name_by_id(i, xstats_names[i].name,
+ sizeof(xstats_names[i].name))) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i;
+
+ if (ids == NULL)
+ return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
+
+ for (i = 0; i < limit; i++) {
+ if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
+ sizeof(xstats_names[i].name))) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ return -1;
+ }
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int limit)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ unsigned int i, count;
+
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ count = txgbe_xstats_calc_num(dev);
+ if (xstats == NULL)
+ return count;
+
+ limit = min(limit, txgbe_xstats_calc_num(dev));
+
+ /* Extended stats from txgbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ uint32_t offset = 0;
+
+ if (txgbe_get_offset_by_id(i, &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
+ xstats[i].id = i;
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
+ unsigned int limit)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ unsigned int i, count;
+
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ count = txgbe_xstats_calc_num(dev);
+ if (values == NULL)
+ return count;
+
+ limit = min(limit, txgbe_xstats_calc_num(dev));
+
+ /* Extended stats from txgbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ uint32_t offset;
+
+ if (txgbe_get_offset_by_id(i, &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int limit)
+{
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ unsigned int i;
+
+ if (ids == NULL)
+ return txgbe_dev_xstats_get_(dev, values, limit);
+
+ for (i = 0; i < limit; i++) {
+ uint32_t offset;
+
+ if (txgbe_get_offset_by_id(ids[i], &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+
+ /* HW registers are cleared on read */
+ hw->offset_loaded = 0;
+ txgbe_read_stats_registers(hw, hw_stats);
+ hw->offset_loaded = 1;
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+
return 0;
}
return 0;
}
+const uint32_t *
+txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ if (dev->rx_pkt_burst == txgbe_recv_pkts ||
+ dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
+ dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
+ dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
+ return txgbe_get_supported_ptypes();
+
+ return NULL;
+}
+
void
txgbe_dev_setup_link_alarm_handler(void *param)
{
return 0;
}
+static int
+txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t mask;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (queue_id < 32) {
+ mask = rd32(hw, TXGBE_IMS(0));
+ mask &= (1 << queue_id);
+ wr32(hw, TXGBE_IMS(0), mask);
+ } else if (queue_id < 64) {
+ mask = rd32(hw, TXGBE_IMS(1));
+ mask &= (1 << (queue_id - 32));
+ wr32(hw, TXGBE_IMS(1), mask);
+ }
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (queue_id < 32) {
+ mask = rd32(hw, TXGBE_IMS(0));
+ mask &= ~(1 << queue_id);
+ wr32(hw, TXGBE_IMS(0), mask);
+ } else if (queue_id < 64) {
+ mask = rd32(hw, TXGBE_IMS(1));
+ mask &= ~(1 << (queue_id - 32));
+ wr32(hw, TXGBE_IMS(1), mask);
+ }
+
+ return 0;
+}
+
/**
* set the IVAR registers, mapping interrupt causes to vectors
* @param hw
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
+ .dev_start = txgbe_dev_start,
+ .dev_stop = txgbe_dev_stop,
.dev_set_link_up = txgbe_dev_set_link_up,
.dev_set_link_down = txgbe_dev_set_link_down,
+ .dev_close = txgbe_dev_close,
+ .dev_reset = txgbe_dev_reset,
+ .link_update = txgbe_dev_link_update,
+ .stats_get = txgbe_dev_stats_get,
+ .xstats_get = txgbe_dev_xstats_get,
+ .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
+ .stats_reset = txgbe_dev_stats_reset,
+ .xstats_reset = txgbe_dev_xstats_reset,
+ .xstats_get_names = txgbe_dev_xstats_get_names,
+ .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
+ .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
+ .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
+ .vlan_filter_set = txgbe_vlan_filter_set,
+ .vlan_tpid_set = txgbe_vlan_tpid_set,
+ .vlan_offload_set = txgbe_vlan_offload_set,
+ .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
.rx_queue_start = txgbe_dev_rx_queue_start,
.rx_queue_stop = txgbe_dev_rx_queue_stop,
.tx_queue_start = txgbe_dev_tx_queue_start,
.tx_queue_stop = txgbe_dev_tx_queue_stop,
.rx_queue_setup = txgbe_dev_rx_queue_setup,
+ .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
.rx_queue_release = txgbe_dev_rx_queue_release,
.tx_queue_setup = txgbe_dev_tx_queue_setup,
.tx_queue_release = txgbe_dev_tx_queue_release,
.uc_hash_table_set = txgbe_uc_hash_table_set,
.uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
.set_mc_addr_list = txgbe_dev_set_mc_addr_list,
+ .rxq_info_get = txgbe_rxq_info_get,
+ .txq_info_get = txgbe_txq_info_get,
};
RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);