X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fngbe%2Fngbe_ethdev.c;h=d369cfb6dd66cf38f25de108b298c80364903418;hb=07baabb6a51a2d6a8cfcdc52acee2ce629f5c22e;hp=31c1ebd3d677e1d9d8efb902bb84817c07372ac4;hpb=3518df5774c759b407056a835ac5989175e3e92a;p=dpdk.git diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c index 31c1ebd3d6..d369cfb6dd 100644 --- a/drivers/net/ngbe/ngbe_ethdev.c +++ b/drivers/net/ngbe/ngbe_ethdev.c @@ -17,6 +17,10 @@ static int ngbe_dev_close(struct rte_eth_dev *dev); static int ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static int ngbe_dev_stats_reset(struct rte_eth_dev *dev); +static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); +static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, + uint16_t queue); static void ngbe_dev_link_status_print(struct rte_eth_dev *dev); static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); @@ -27,6 +31,24 @@ static void ngbe_dev_interrupt_handler(void *param); static void ngbe_dev_interrupt_delayed_handler(void *param); static void ngbe_configure_msix(struct rte_eth_dev *dev); +#define NGBE_SET_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ + (h)->bitmap[idx] |= 1 << bit;\ + } while (0) + +#define NGBE_CLEAR_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ + (h)->bitmap[idx] &= ~(1 << bit);\ + } while (0) + +#define NGBE_GET_HWSTRIP(h, q, r) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ + (r) = (h)->bitmap[idx] >> bit & 1;\ + } while (0) + /* * The set of PCI devices this driver supports */ @@ -62,6 +84,104 @@ static const struct rte_eth_desc_lim tx_desc_lim = { static const struct eth_dev_ops ngbe_eth_dev_ops; +#define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)} +#define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)} +static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { + /* MNG RxTx */ + HW_XSTAT(mng_bmc2host_packets), + HW_XSTAT(mng_host2bmc_packets), + /* Basic RxTx */ + HW_XSTAT(rx_packets), + HW_XSTAT(tx_packets), + HW_XSTAT(rx_bytes), + HW_XSTAT(tx_bytes), + HW_XSTAT(rx_total_bytes), + HW_XSTAT(rx_total_packets), + HW_XSTAT(tx_total_packets), + HW_XSTAT(rx_total_missed_packets), + HW_XSTAT(rx_broadcast_packets), + HW_XSTAT(rx_multicast_packets), + HW_XSTAT(rx_management_packets), + HW_XSTAT(tx_management_packets), + HW_XSTAT(rx_management_dropped), + + /* Basic Error */ + HW_XSTAT(rx_crc_errors), + HW_XSTAT(rx_illegal_byte_errors), + HW_XSTAT(rx_error_bytes), + HW_XSTAT(rx_mac_short_packet_dropped), + HW_XSTAT(rx_length_errors), + HW_XSTAT(rx_undersize_errors), + HW_XSTAT(rx_fragment_errors), + HW_XSTAT(rx_oversize_errors), + HW_XSTAT(rx_jabber_errors), + HW_XSTAT(rx_l3_l4_xsum_error), + HW_XSTAT(mac_local_errors), + HW_XSTAT(mac_remote_errors), + + /* MACSEC */ + HW_XSTAT(tx_macsec_pkts_untagged), + HW_XSTAT(tx_macsec_pkts_encrypted), + HW_XSTAT(tx_macsec_pkts_protected), + HW_XSTAT(tx_macsec_octets_encrypted), + HW_XSTAT(tx_macsec_octets_protected), + HW_XSTAT(rx_macsec_pkts_untagged), + HW_XSTAT(rx_macsec_pkts_badtag), + HW_XSTAT(rx_macsec_pkts_nosci), + HW_XSTAT(rx_macsec_pkts_unknownsci), + HW_XSTAT(rx_macsec_octets_decrypted), + HW_XSTAT(rx_macsec_octets_validated), + HW_XSTAT(rx_macsec_sc_pkts_unchecked), + HW_XSTAT(rx_macsec_sc_pkts_delayed), + HW_XSTAT(rx_macsec_sc_pkts_late), + HW_XSTAT(rx_macsec_sa_pkts_ok), + HW_XSTAT(rx_macsec_sa_pkts_invalid), + HW_XSTAT(rx_macsec_sa_pkts_notvalid), + HW_XSTAT(rx_macsec_sa_pkts_unusedsa), + HW_XSTAT(rx_macsec_sa_pkts_notusingsa), + + /* MAC RxTx */ + HW_XSTAT(rx_size_64_packets), + HW_XSTAT(rx_size_65_to_127_packets), + HW_XSTAT(rx_size_128_to_255_packets), + HW_XSTAT(rx_size_256_to_511_packets), + HW_XSTAT(rx_size_512_to_1023_packets), + HW_XSTAT(rx_size_1024_to_max_packets), + HW_XSTAT(tx_size_64_packets), + HW_XSTAT(tx_size_65_to_127_packets), + HW_XSTAT(tx_size_128_to_255_packets), + HW_XSTAT(tx_size_256_to_511_packets), + HW_XSTAT(tx_size_512_to_1023_packets), + HW_XSTAT(tx_size_1024_to_max_packets), + + /* Flow Control */ + HW_XSTAT(tx_xon_packets), + HW_XSTAT(rx_xon_packets), + HW_XSTAT(tx_xoff_packets), + HW_XSTAT(rx_xoff_packets), + + HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"), + HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"), + HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"), + HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"), +}; + +#define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \ + sizeof(rte_ngbe_stats_strings[0])) + +/* Per-queue statistics */ +#define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)} +static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = { + QP_XSTAT(rx_qp_packets), + QP_XSTAT(tx_qp_packets), + QP_XSTAT(rx_qp_bytes), + QP_XSTAT(tx_qp_bytes), + QP_XSTAT(rx_qp_mc_packets), +}; + +#define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \ + sizeof(rte_ngbe_qp_strings[0])) + static inline int32_t ngbe_pf_reset_hw(struct ngbe_hw *hw) { @@ -129,7 +249,9 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct ngbe_hw *hw = ngbe_dev_hw(eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev); + struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; const struct rte_memzone *mz; uint32_t ctrl_ext; int err; @@ -137,11 +259,37 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = &ngbe_eth_dev_ops; + eth_dev->rx_pkt_burst = &ngbe_recv_pkts; + eth_dev->tx_pkt_burst = &ngbe_xmit_pkts; + eth_dev->tx_pkt_prepare = &ngbe_prep_pkts; + + /* + * For secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * Rx and Tx function. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + struct ngbe_tx_queue *txq; + /* Tx queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process + */ + if (eth_dev->data->tx_queues) { + uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues; + txq = eth_dev->data->tx_queues[nb_tx_queues - 1]; + ngbe_set_tx_function(eth_dev, txq); + } else { + /* Use default Tx function if we get here */ + PMD_INIT_LOG(NOTICE, + "No Tx queues configured yet. Using default Tx function."); + } + + ngbe_set_rx_function(eth_dev); - if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + } rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; @@ -188,6 +336,9 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) return -EIO; } + /* Reset the hw statistics */ + ngbe_dev_stats_reset(eth_dev); + /* disable interrupt */ ngbe_disable_intr(hw); @@ -217,6 +368,12 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) return -ENOMEM; } + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + /* initialize the hw strip bitmap*/ + memset(hwstrip, 0, sizeof(*hwstrip)); + ctrl_ext = rd32(hw, NGBE_PORTCTL); /* let hardware know driver is loaded */ ctrl_ext |= NGBE_PORTCTL_DRVLOAD; @@ -254,7 +411,7 @@ eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev) ngbe_dev_close(eth_dev); - return -EINVAL; + return 0; } static int @@ -286,6 +443,362 @@ static struct rte_pci_driver rte_ngbe_pmd = { .remove = eth_ngbe_pci_remove, }; +static int +ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F); + vid_bit = (uint32_t)(1 << (vlan_id & 0x1F)); + vfta = rd32(hw, NGBE_VLANTBL(vid_idx)); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + wr32(hw, NGBE_VLANTBL(vid_idx), vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static void +ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_rx_queue *rxq; + bool restart; + uint32_t rxcfg, rxbal, rxbah; + + if (on) + ngbe_vlan_hw_strip_enable(dev, queue); + else + ngbe_vlan_hw_strip_disable(dev, queue); + + rxq = dev->data->rx_queues[queue]; + rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx)); + rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx)); + rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { + restart = (rxcfg & NGBE_RXCFG_ENA) && + !(rxcfg & NGBE_RXCFG_VLAN); + rxcfg |= NGBE_RXCFG_VLAN; + } else { + restart = (rxcfg & NGBE_RXCFG_ENA) && + (rxcfg & NGBE_RXCFG_VLAN); + rxcfg &= ~NGBE_RXCFG_VLAN; + } + rxcfg &= ~NGBE_RXCFG_ENA; + + if (restart) { + /* set vlan strip for ring */ + ngbe_dev_rx_queue_stop(dev, queue); + wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal); + wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah); + wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg); + ngbe_dev_rx_queue_start(dev, queue); + } +} + +static int +ngbe_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + int ret = 0; + uint32_t portctrl, vlan_ext, qinq; + + portctrl = rd32(hw, NGBE_PORTCTL); + + vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT); + qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ); + switch (vlan_type) { + case RTE_ETH_VLAN_TYPE_INNER: + if (vlan_ext) { + wr32m(hw, NGBE_VLANCTL, + NGBE_VLANCTL_TPID_MASK, + NGBE_VLANCTL_TPID(tpid)); + wr32m(hw, NGBE_DMATXCTRL, + NGBE_DMATXCTRL_TPID_MASK, + NGBE_DMATXCTRL_TPID(tpid)); + } else { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, + "Inner type is not supported by single VLAN"); + } + + if (qinq) { + wr32m(hw, NGBE_TAGTPID(0), + NGBE_TAGTPID_LSB_MASK, + NGBE_TAGTPID_LSB(tpid)); + } + break; + case RTE_ETH_VLAN_TYPE_OUTER: + if (vlan_ext) { + /* Only the high 16-bits is valid */ + wr32m(hw, NGBE_EXTAG, + NGBE_EXTAG_VLAN_MASK, + NGBE_EXTAG_VLAN(tpid)); + } else { + wr32m(hw, NGBE_VLANCTL, + NGBE_VLANCTL_TPID_MASK, + NGBE_VLANCTL_TPID(tpid)); + wr32m(hw, NGBE_DMATXCTRL, + NGBE_DMATXCTRL_TPID_MASK, + NGBE_DMATXCTRL_TPID(tpid)); + } + + if (qinq) { + wr32m(hw, NGBE_TAGTPID(0), + NGBE_TAGTPID_MSB_MASK, + NGBE_TAGTPID_MSB(tpid)); + } + break; + default: + PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); + return -EINVAL; + } + + return ret; +} + +void +ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + uint32_t vlnctrl; + + PMD_INIT_FUNC_TRACE(); + + /* Filter Table Disable */ + vlnctrl = rd32(hw, NGBE_VLANCTL); + vlnctrl &= ~NGBE_VLANCTL_VFE; + wr32(hw, NGBE_VLANCTL, vlnctrl); +} + +void +ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); + uint32_t vlnctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + /* Filter Table Enable */ + vlnctrl = rd32(hw, NGBE_VLANCTL); + vlnctrl &= ~NGBE_VLANCTL_CFIENA; + vlnctrl |= NGBE_VLANCTL_VFE; + wr32(hw, NGBE_VLANCTL, vlnctrl); + + /* write whatever is in local vfta copy */ + for (i = 0; i < NGBE_VFTA_SIZE; i++) + wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]); +} + +void +ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) +{ + struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev); + struct ngbe_rx_queue *rxq; + + if (queue >= NGBE_MAX_RX_QUEUE_NUM) + return; + + if (on) + NGBE_SET_HWSTRIP(hwstrip, queue); + else + NGBE_CLEAR_HWSTRIP(hwstrip, queue); + + if (queue >= dev->data->nb_rx_queues) + return; + + rxq = dev->data->rx_queues[queue]; + + if (on) { + rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; + rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; + } else { + rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; + rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; + } +} + +static void +ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + ctrl = rd32(hw, NGBE_RXCFG(queue)); + ctrl &= ~NGBE_RXCFG_VLAN; + wr32(hw, NGBE_RXCFG(queue), ctrl); + + /* record those setting for HW strip per queue */ + ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0); +} + +static void +ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + ctrl = rd32(hw, NGBE_RXCFG(queue)); + ctrl |= NGBE_RXCFG_VLAN; + wr32(hw, NGBE_RXCFG(queue), ctrl); + + /* record those setting for HW strip per queue */ + ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1); +} + +static void +ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + ctrl = rd32(hw, NGBE_PORTCTL); + ctrl &= ~NGBE_PORTCTL_VLANEXT; + ctrl &= ~NGBE_PORTCTL_QINQ; + wr32(hw, NGBE_PORTCTL, ctrl); +} + +static void +ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + ctrl = rd32(hw, NGBE_PORTCTL); + ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ; + wr32(hw, NGBE_PORTCTL, ctrl); +} + +static void +ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + ctrl = rd32(hw, NGBE_PORTCTL); + ctrl &= ~NGBE_PORTCTL_QINQ; + wr32(hw, NGBE_PORTCTL, ctrl); +} + +static void +ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + ctrl = rd32(hw, NGBE_PORTCTL); + ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT; + wr32(hw, NGBE_PORTCTL, ctrl); +} + +void +ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev) +{ + struct ngbe_rx_queue *rxq; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) + ngbe_vlan_hw_strip_enable(dev, i); + else + ngbe_vlan_hw_strip_disable(dev, i); + } +} + +void +ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) +{ + uint16_t i; + struct rte_eth_rxmode *rxmode; + struct ngbe_rx_queue *rxq; + + if (mask & RTE_ETH_VLAN_STRIP_MASK) { + rxmode = &dev->data->dev_conf.rxmode; + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; + } + else + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; + } + } +} + +static int +ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; + + if (mask & RTE_ETH_VLAN_STRIP_MASK) + ngbe_vlan_hw_strip_config(dev); + + if (mask & RTE_ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) + ngbe_vlan_hw_filter_enable(dev); + else + ngbe_vlan_hw_filter_disable(dev); + } + + if (mask & RTE_ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) + ngbe_vlan_hw_extend_enable(dev); + else + ngbe_vlan_hw_extend_disable(dev); + } + + if (mask & RTE_ETH_QINQ_STRIP_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) + ngbe_qinq_hw_strip_enable(dev); + else + ngbe_qinq_hw_strip_disable(dev); + } + + return 0; +} + +static int +ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ngbe_config_vlan_strip_on_all_queues(dev, mask); + + ngbe_vlan_offload_config(dev, mask); + + return 0; +} + static int ngbe_dev_configure(struct rte_eth_dev *dev) { @@ -331,13 +844,15 @@ static int ngbe_dev_start(struct rte_eth_dev *dev) { struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; uint32_t intr_vector = 0; int err; bool link_up = false, negotiate = false; uint32_t speed = 0; uint32_t allowed_speeds = 0; + int mask = 0; int status; uint32_t *link_speeds; @@ -370,11 +885,9 @@ ngbe_dev_start(struct rte_eth_dev *dev) return -1; } - if (rte_intr_dp_is_en(intr_handle) && intr_handle->intr_vec == NULL) { - intr_handle->intr_vec = - rte_zmalloc("intr_vec", - dev->data->nb_rx_queues * sizeof(int), 0); - if (intr_handle->intr_vec == NULL) { + if (rte_intr_dp_is_en(intr_handle)) { + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + dev->data->nb_rx_queues)) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues intr_vec", dev->data->nb_rx_queues); @@ -395,6 +908,16 @@ ngbe_dev_start(struct rte_eth_dev *dev) goto error; } + mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK; + err = ngbe_vlan_offload_config(dev, mask); + if (err != 0) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); + goto error; + } + + ngbe_configure_port(dev); + err = ngbe_dev_rxtx_start(dev); if (err < 0) { PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); @@ -407,7 +930,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) dev->data->dev_link.link_status = link_up; link_speeds = &dev->data->dev_conf.link_speeds; - if (*link_speeds == ETH_LINK_SPEED_AUTONEG) + if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) negotiate = true; err = hw->mac.get_link_capabilities(hw, &speed, &negotiate); @@ -416,11 +939,11 @@ ngbe_dev_start(struct rte_eth_dev *dev) allowed_speeds = 0; if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL) - allowed_speeds |= ETH_LINK_SPEED_1G; + allowed_speeds |= RTE_ETH_LINK_SPEED_1G; if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL) - allowed_speeds |= ETH_LINK_SPEED_100M; + allowed_speeds |= RTE_ETH_LINK_SPEED_100M; if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL) - allowed_speeds |= ETH_LINK_SPEED_10M; + allowed_speeds |= RTE_ETH_LINK_SPEED_10M; if (*link_speeds & ~allowed_speeds) { PMD_INIT_LOG(ERR, "Invalid link setting"); @@ -428,14 +951,14 @@ ngbe_dev_start(struct rte_eth_dev *dev) } speed = 0x0; - if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { + if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { speed = hw->mac.default_speeds; } else { - if (*link_speeds & ETH_LINK_SPEED_1G) + if (*link_speeds & RTE_ETH_LINK_SPEED_1G) speed |= NGBE_LINK_SPEED_1GB_FULL; - if (*link_speeds & ETH_LINK_SPEED_100M) + if (*link_speeds & RTE_ETH_LINK_SPEED_100M) speed |= NGBE_LINK_SPEED_100M_FULL; - if (*link_speeds & ETH_LINK_SPEED_10M) + if (*link_speeds & RTE_ETH_LINK_SPEED_10M) speed |= NGBE_LINK_SPEED_10M_FULL; } @@ -484,6 +1007,9 @@ ngbe_dev_start(struct rte_eth_dev *dev) */ ngbe_dev_link_update(dev, 0); + ngbe_read_stats_registers(hw, hw_stats); + hw->offset_loaded = 1; + return 0; error: @@ -501,7 +1027,7 @@ ngbe_dev_stop(struct rte_eth_dev *dev) struct rte_eth_link link; struct ngbe_hw *hw = ngbe_dev_hw(dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; if (hw->adapter_stopped) return 0; @@ -526,6 +1052,9 @@ ngbe_dev_stop(struct rte_eth_dev *dev) ngbe_dev_clear_queues(dev); + /* Clear stored conf */ + dev->data->scattered_rx = 0; + /* Clear recorded link status */ memset(&link, 0, sizeof(link)); rte_eth_linkstatus_set(dev, &link); @@ -538,10 +1067,7 @@ ngbe_dev_stop(struct rte_eth_dev *dev) /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec != NULL) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + rte_intr_vec_list_free(intr_handle); hw->adapter_stopped = true; dev->data->dev_started = 0; @@ -555,11 +1081,518 @@ ngbe_dev_stop(struct rte_eth_dev *dev) static int ngbe_dev_close(struct rte_eth_dev *dev) { + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + int retries = 0; + int ret; + PMD_INIT_FUNC_TRACE(); - RTE_SET_USED(dev); + ngbe_pf_reset_hw(hw); + + ngbe_dev_stop(dev); + + ngbe_dev_free_queues(dev); + + /* reprogram the RAR[0] in case user changed it. */ + ngbe_set_rar(hw, 0, hw->mac.addr, 0, true); + + /* Unlock any pending hardware semaphore */ + ngbe_swfw_lock_reset(hw); + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + + do { + ret = rte_intr_callback_unregister(intr_handle, + ngbe_dev_interrupt_handler, dev); + if (ret >= 0 || ret == -ENOENT) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + } + rte_delay_ms(100); + } while (retries++ < (10 + NGBE_LINK_UP_TIME)); + + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + + rte_free(dev->data->hash_mac_addrs); + dev->data->hash_mac_addrs = NULL; + + return ret; +} + +/* + * Reset PF device. + */ +static int +ngbe_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = eth_ngbe_dev_uninit(dev); + if (ret != 0) + return ret; + + ret = eth_ngbe_dev_init(dev, NULL); + + return ret; +} + +#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \ + { \ + uint32_t current_counter = rd32(hw, reg); \ + if (current_counter < last_counter) \ + current_counter += 0x100000000LL; \ + if (!hw->offset_loaded) \ + last_counter = current_counter; \ + counter = current_counter - last_counter; \ + counter &= 0xFFFFFFFFLL; \ + } + +#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + uint64_t current_counter_lsb = rd32(hw, reg_lsb); \ + uint64_t current_counter_msb = rd32(hw, reg_msb); \ + uint64_t current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + current_counter += 0x1000000000LL; \ + if (!hw->offset_loaded) \ + last_counter = current_counter; \ + counter = current_counter - last_counter; \ + counter &= 0xFFFFFFFFFLL; \ + } + +void +ngbe_read_stats_registers(struct ngbe_hw *hw, + struct ngbe_hw_stats *hw_stats) +{ + unsigned int i; + + /* QP Stats */ + for (i = 0; i < hw->nb_rx_queues; i++) { + UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i), + hw->qp_last[i].rx_qp_packets, + hw_stats->qp[i].rx_qp_packets); + UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i), + hw->qp_last[i].rx_qp_bytes, + hw_stats->qp[i].rx_qp_bytes); + UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i), + hw->qp_last[i].rx_qp_mc_packets, + hw_stats->qp[i].rx_qp_mc_packets); + UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i), + hw->qp_last[i].rx_qp_bc_packets, + hw_stats->qp[i].rx_qp_bc_packets); + } + + for (i = 0; i < hw->nb_tx_queues; i++) { + UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i), + hw->qp_last[i].tx_qp_packets, + hw_stats->qp[i].tx_qp_packets); + UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i), + hw->qp_last[i].tx_qp_bytes, + hw_stats->qp[i].tx_qp_bytes); + UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i), + hw->qp_last[i].tx_qp_mc_packets, + hw_stats->qp[i].tx_qp_mc_packets); + UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i), + hw->qp_last[i].tx_qp_bc_packets, + hw_stats->qp[i].tx_qp_bc_packets); + } + + /* PB Stats */ + hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS); + hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT); + hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP); + hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP); + hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF); + hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON); + + hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON); + hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF); + + /* DMA Stats */ + hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP); + hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP); + hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP); + hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP); + hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT); + hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT); + hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL); + hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL); + + /* MAC Stats */ + hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL); + hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL); + hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL); + + hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL); + hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL); + hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL); + + hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL); + hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL); + + hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L); + hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L); + hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L); + hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L); + hw_stats->rx_size_512_to_1023_packets += + rd64(hw, NGBE_MACRX512TO1023L); + hw_stats->rx_size_1024_to_max_packets += + rd64(hw, NGBE_MACRX1024TOMAXL); + hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L); + hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L); + hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L); + hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L); + hw_stats->tx_size_512_to_1023_packets += + rd64(hw, NGBE_MACTX512TO1023L); + hw_stats->tx_size_1024_to_max_packets += + rd64(hw, NGBE_MACTX1024TOMAXL); + + hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL); + hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE); + hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER); + + /* MNG Stats */ + hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS); + hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC); + hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG); + hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG); + + /* MACsec Stats */ + hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT); + hw_stats->tx_macsec_pkts_encrypted += + rd32(hw, NGBE_LSECTX_ENCPKT); + hw_stats->tx_macsec_pkts_protected += + rd32(hw, NGBE_LSECTX_PROTPKT); + hw_stats->tx_macsec_octets_encrypted += + rd32(hw, NGBE_LSECTX_ENCOCT); + hw_stats->tx_macsec_octets_protected += + rd32(hw, NGBE_LSECTX_PROTOCT); + hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT); + hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT); + hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT); + hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT); + hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT); + hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT); + hw_stats->rx_macsec_sc_pkts_unchecked += + rd32(hw, NGBE_LSECRX_UNCHKPKT); + hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT); + hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT); + for (i = 0; i < 2; i++) { + hw_stats->rx_macsec_sa_pkts_ok += + rd32(hw, NGBE_LSECRX_OKPKT(i)); + hw_stats->rx_macsec_sa_pkts_invalid += + rd32(hw, NGBE_LSECRX_INVPKT(i)); + hw_stats->rx_macsec_sa_pkts_notvalid += + rd32(hw, NGBE_LSECRX_BADPKT(i)); + } + for (i = 0; i < 4; i++) { + hw_stats->rx_macsec_sa_pkts_unusedsa += + rd32(hw, NGBE_LSECRX_INVSAPKT(i)); + hw_stats->rx_macsec_sa_pkts_notusingsa += + rd32(hw, NGBE_LSECRX_BADSAPKT(i)); + } + hw_stats->rx_total_missed_packets = + hw_stats->rx_up_dropped; +} + +static int +ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); + struct ngbe_stat_mappings *stat_mappings = + NGBE_DEV_STAT_MAPPINGS(dev); + uint32_t i, j; + + ngbe_read_stats_registers(hw, hw_stats); + + if (stats == NULL) + return -EINVAL; + + /* Fill out the rte_eth_stats statistics structure */ + stats->ipackets = hw_stats->rx_packets; + stats->ibytes = hw_stats->rx_bytes; + stats->opackets = hw_stats->tx_packets; + stats->obytes = hw_stats->tx_bytes; + + memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets)); + memset(&stats->q_opackets, 0, sizeof(stats->q_opackets)); + memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes)); + memset(&stats->q_obytes, 0, sizeof(stats->q_obytes)); + memset(&stats->q_errors, 0, sizeof(stats->q_errors)); + for (i = 0; i < NGBE_MAX_QP; i++) { + uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG; + uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8; + uint32_t q_map; + + q_map = (stat_mappings->rqsm[n] >> offset) + & QMAP_FIELD_RESERVED_BITS_MASK; + j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS + ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); + stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets; + stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes; + + q_map = (stat_mappings->tqsm[n] >> offset) + & QMAP_FIELD_RESERVED_BITS_MASK; + j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS + ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); + stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets; + stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes; + } + + /* Rx Errors */ + stats->imissed = hw_stats->rx_total_missed_packets + + hw_stats->rx_dma_drop; + stats->ierrors = hw_stats->rx_crc_errors + + hw_stats->rx_mac_short_packet_dropped + + hw_stats->rx_length_errors + + hw_stats->rx_undersize_errors + + hw_stats->rx_oversize_errors + + hw_stats->rx_illegal_byte_errors + + hw_stats->rx_error_bytes + + hw_stats->rx_fragment_errors; + + /* Tx Errors */ + stats->oerrors = 0; + return 0; +} + +static int +ngbe_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); + + /* HW registers are cleared on read */ + hw->offset_loaded = 0; + ngbe_dev_stats_get(dev, NULL); + hw->offset_loaded = 1; + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); + + return 0; +} + +/* This function calculates the number of xstats based on the current config */ +static unsigned +ngbe_xstats_calc_num(struct rte_eth_dev *dev) +{ + int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues); + return NGBE_NB_HW_STATS + + NGBE_NB_QP_STATS * nb_queues; +} + +static inline int +ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size) +{ + int nb, st; + + /* Extended stats from ngbe_hw_stats */ + if (id < NGBE_NB_HW_STATS) { + snprintf(name, size, "[hw]%s", + rte_ngbe_stats_strings[id].name); + return 0; + } + id -= NGBE_NB_HW_STATS; + + /* Queue Stats */ + if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) { + nb = id / NGBE_NB_QP_STATS; + st = id % NGBE_NB_QP_STATS; + snprintf(name, size, "[q%u]%s", nb, + rte_ngbe_qp_strings[st].name); + return 0; + } + id -= NGBE_NB_QP_STATS * NGBE_MAX_QP; + + return -(int)(id + 1); +} + +static inline int +ngbe_get_offset_by_id(uint32_t id, uint32_t *offset) +{ + int nb, st; + + /* Extended stats from ngbe_hw_stats */ + if (id < NGBE_NB_HW_STATS) { + *offset = rte_ngbe_stats_strings[id].offset; + return 0; + } + id -= NGBE_NB_HW_STATS; + + /* Queue Stats */ + if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) { + nb = id / NGBE_NB_QP_STATS; + st = id % NGBE_NB_QP_STATS; + *offset = rte_ngbe_qp_strings[st].offset + + nb * (NGBE_NB_QP_STATS * sizeof(uint64_t)); + return 0; + } + + return -1; +} + +static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned int limit) +{ + unsigned int i, count; + + count = ngbe_xstats_calc_num(dev); + if (xstats_names == NULL) + return count; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + limit = min(limit, count); + + /* Extended stats from ngbe_hw_stats */ + for (i = 0; i < limit; i++) { + if (ngbe_get_name_by_id(i, xstats_names[i].name, + sizeof(xstats_names[i].name))) { + PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); + break; + } + } + + return i; +} + +static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) +{ + unsigned int i; + + if (ids == NULL) + return ngbe_dev_xstats_get_names(dev, xstats_names, limit); + + for (i = 0; i < limit; i++) { + if (ngbe_get_name_by_id(ids[i], xstats_names[i].name, + sizeof(xstats_names[i].name))) { + PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); + return -1; + } + } + + return i; +} + +static int +ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int limit) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); + unsigned int i, count; + + ngbe_read_stats_registers(hw, hw_stats); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + count = ngbe_xstats_calc_num(dev); + if (xstats == NULL) + return count; + + limit = min(limit, ngbe_xstats_calc_num(dev)); + + /* Extended stats from ngbe_hw_stats */ + for (i = 0; i < limit; i++) { + uint32_t offset = 0; + + if (ngbe_get_offset_by_id(i, &offset)) { + PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); + break; + } + xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset); + xstats[i].id = i; + } + + return i; +} + +static int +ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values, + unsigned int limit) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); + unsigned int i, count; + + ngbe_read_stats_registers(hw, hw_stats); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + count = ngbe_xstats_calc_num(dev); + if (values == NULL) + return count; + + limit = min(limit, ngbe_xstats_calc_num(dev)); + + /* Extended stats from ngbe_hw_stats */ + for (i = 0; i < limit; i++) { + uint32_t offset; + + if (ngbe_get_offset_by_id(i, &offset)) { + PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); + break; + } + values[i] = *(uint64_t *)(((char *)hw_stats) + offset); + } + + return i; +} + +static int +ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int limit) +{ + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); + unsigned int i; + + if (ids == NULL) + return ngbe_dev_xstats_get_(dev, values, limit); - return -EINVAL; + for (i = 0; i < limit; i++) { + uint32_t offset; + + if (ngbe_get_offset_by_id(ids[i], &offset)) { + PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); + break; + } + values[i] = *(uint64_t *)(((char *)hw_stats) + offset); + } + + return i; +} + +static int +ngbe_dev_xstats_reset(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); + + /* HW registers are cleared on read */ + hw->offset_loaded = 0; + ngbe_read_stats_registers(hw, hw_stats); + hw->offset_loaded = 1; + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); + + return 0; } static int @@ -569,6 +1602,13 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; + dev_info->max_rx_pktlen = 15872; + dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = 0; + dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -594,10 +1634,12 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_desc_lim = rx_desc_lim; dev_info->tx_desc_lim = tx_desc_lim; - dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_10M; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M | + RTE_ETH_LINK_SPEED_10M; /* Driver-preferred Rx/Tx parameters */ + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; dev_info->default_rxportconf.nb_queues = 1; dev_info->default_txportconf.nb_queues = 1; dev_info->default_rxportconf.ring_size = 256; @@ -606,6 +1648,18 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) return 0; } +const uint32_t * +ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + if (dev->rx_pkt_burst == ngbe_recv_pkts || + dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc || + dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc || + dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc) + return ngbe_get_supported_ptypes(); + + return NULL; +} + /* return 0 means link status changed, -1 means not changed */ int ngbe_dev_link_update_share(struct rte_eth_dev *dev, @@ -621,11 +1675,11 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, int wait = 1; memset(&link, 0, sizeof(link)); - link.link_status = ETH_LINK_DOWN; - link.link_speed = ETH_SPEED_NUM_NONE; - link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_status = RTE_ETH_LINK_DOWN; + link.link_speed = RTE_ETH_SPEED_NUM_NONE; + link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; link.link_autoneg = !(dev->data->dev_conf.link_speeds & - ~ETH_LINK_SPEED_AUTONEG); + ~RTE_ETH_LINK_SPEED_AUTONEG); hw->mac.get_link_status = true; @@ -638,8 +1692,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, err = hw->mac.check_link(hw, &link_speed, &link_up, wait); if (err != 0) { - link.link_speed = ETH_SPEED_NUM_NONE; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = RTE_ETH_SPEED_NUM_NONE; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; return rte_eth_linkstatus_set(dev, &link); } @@ -647,27 +1701,27 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, return rte_eth_linkstatus_set(dev, &link); intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG; - link.link_status = ETH_LINK_UP; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_status = RTE_ETH_LINK_UP; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; switch (link_speed) { default: case NGBE_LINK_SPEED_UNKNOWN: - link.link_speed = ETH_SPEED_NUM_NONE; + link.link_speed = RTE_ETH_SPEED_NUM_NONE; break; case NGBE_LINK_SPEED_10M_FULL: - link.link_speed = ETH_SPEED_NUM_10M; + link.link_speed = RTE_ETH_SPEED_NUM_10M; lan_speed = 0; break; case NGBE_LINK_SPEED_100M_FULL: - link.link_speed = ETH_SPEED_NUM_100M; + link.link_speed = RTE_ETH_SPEED_NUM_100M; lan_speed = 1; break; case NGBE_LINK_SPEED_1GB_FULL: - link.link_speed = ETH_SPEED_NUM_1G; + link.link_speed = RTE_ETH_SPEED_NUM_1G; lan_speed = 2; break; } @@ -851,11 +1905,11 @@ ngbe_dev_link_status_print(struct rte_eth_dev *dev) rte_eth_linkstatus_get(dev, &link); - if (link.link_status == ETH_LINK_UP) { + if (link.link_status == RTE_ETH_LINK_UP) { PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", (int)(dev->data->port_id), (unsigned int)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? + link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? "full-duplex" : "half-duplex"); } else { PMD_INIT_LOG(INFO, " Port %d: Link Down", @@ -895,7 +1949,7 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev) ngbe_dev_link_update(dev, 0); /* likely to up */ - if (link.link_status != ETH_LINK_UP) + if (link.link_status != RTE_ETH_LINK_UP) /* handle it 1 sec later, wait it being stable */ timeout = NGBE_LINK_UP_CHECK_TIMEOUT; /* likely to down */ @@ -987,6 +2041,33 @@ ngbe_dev_interrupt_handler(void *param) ngbe_dev_interrupt_action(dev); } +static int +ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4; + struct rte_eth_dev_data *dev_data = dev->data; + + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. + */ + if (dev_data->dev_started && !dev_data->scattered_rx && + (frame_size + 2 * NGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); + return -EINVAL; + } + + if (hw->mode) + wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, + NGBE_FRAME_SIZE_MAX); + else + wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, + NGBE_FRMSZ_MAX(frame_size)); + + return 0; +} + /** * Set the IVAR registers, mapping interrupt causes to vectors * @param hw @@ -1032,7 +2113,7 @@ static void ngbe_configure_msix(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct ngbe_hw *hw = ngbe_dev_hw(dev); uint32_t queue_id, base = NGBE_MISC_VEC_ID; uint32_t vec = NGBE_MISC_VEC_ID; @@ -1067,8 +2148,10 @@ ngbe_configure_msix(struct rte_eth_dev *dev) queue_id++) { /* by default, 1:1 mapping */ ngbe_set_ivar_map(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) + rte_intr_vec_list_index_set(intr_handle, + queue_id, vec); + if (vec < base + rte_intr_nb_efd_get(intr_handle) + - 1) vec++; } @@ -1084,11 +2167,32 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = { .dev_infos_get = ngbe_dev_info_get, .dev_start = ngbe_dev_start, .dev_stop = ngbe_dev_stop, + .dev_close = ngbe_dev_close, + .dev_reset = ngbe_dev_reset, .link_update = ngbe_dev_link_update, + .stats_get = ngbe_dev_stats_get, + .xstats_get = ngbe_dev_xstats_get, + .xstats_get_by_id = ngbe_dev_xstats_get_by_id, + .stats_reset = ngbe_dev_stats_reset, + .xstats_reset = ngbe_dev_xstats_reset, + .xstats_get_names = ngbe_dev_xstats_get_names, + .xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id, + .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get, + .mtu_set = ngbe_dev_mtu_set, + .vlan_filter_set = ngbe_vlan_filter_set, + .vlan_tpid_set = ngbe_vlan_tpid_set, + .vlan_offload_set = ngbe_vlan_offload_set, + .vlan_strip_queue_set = ngbe_vlan_strip_queue_set, + .rx_queue_start = ngbe_dev_rx_queue_start, + .rx_queue_stop = ngbe_dev_rx_queue_stop, + .tx_queue_start = ngbe_dev_tx_queue_start, + .tx_queue_stop = ngbe_dev_tx_queue_stop, .rx_queue_setup = ngbe_dev_rx_queue_setup, .rx_queue_release = ngbe_dev_rx_queue_release, .tx_queue_setup = ngbe_dev_tx_queue_setup, .tx_queue_release = ngbe_dev_tx_queue_release, + .rx_burst_mode_get = ngbe_rx_burst_mode_get, + .tx_burst_mode_get = ngbe_tx_burst_mode_get, }; RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);