X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=47a263a1fb33d735dfd5b4ecc02515c5f0bdcfb6;hb=24ac604ef7469eb5773c2504b313dd00257f8df3;hp=5f7dec086d193fda4038e076db6dd64081d1440e;hpb=c7519ea5eb8d6b810aec74b87b60e0689a6c20ce;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 5f7dec086d..47a263a1fb 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -31,6 +31,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include #include @@ -53,9 +54,9 @@ #include #include -#define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 1 -#define DRV_MODULE_VER_SUBMINOR 1 +#define DRV_MODULE_VER_MAJOR 2 +#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_SUBMINOR 0 #define ENA_IO_TXQ_IDX(q) (2 * (q)) #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) @@ -80,7 +81,6 @@ #define ENA_RX_RSS_TABLE_LOG_SIZE 7 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) #define ENA_HASH_KEY_SIZE 40 -#define ENA_ETH_SS_STATS 0xFF #define ETH_GSTRING_LEN 32 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) @@ -97,11 +97,6 @@ struct ena_stats { int stat_offset; }; -#define ENA_STAT_ENA_COM_ENTRY(stat) { \ - .name = #stat, \ - .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ -} - #define ENA_STAT_ENTRY(stat, stat_type) { \ .name = #stat, \ .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ @@ -126,54 +121,36 @@ struct ena_stats { uint32_t ena_alloc_cnt; static const struct ena_stats ena_stats_global_strings[] = { - ENA_STAT_GLOBAL_ENTRY(tx_timeout), - ENA_STAT_GLOBAL_ENTRY(io_suspend), - ENA_STAT_GLOBAL_ENTRY(io_resume), ENA_STAT_GLOBAL_ENTRY(wd_expired), - ENA_STAT_GLOBAL_ENTRY(interface_up), - ENA_STAT_GLOBAL_ENTRY(interface_down), - ENA_STAT_GLOBAL_ENTRY(admin_q_pause), + ENA_STAT_GLOBAL_ENTRY(dev_start), + ENA_STAT_GLOBAL_ENTRY(dev_stop), }; static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(cnt), ENA_STAT_TX_ENTRY(bytes), - ENA_STAT_TX_ENTRY(queue_stop), - ENA_STAT_TX_ENTRY(queue_wakeup), - ENA_STAT_TX_ENTRY(dma_mapping_err), + ENA_STAT_TX_ENTRY(prepare_ctx_err), ENA_STAT_TX_ENTRY(linearize), ENA_STAT_TX_ENTRY(linearize_failed), ENA_STAT_TX_ENTRY(tx_poll), ENA_STAT_TX_ENTRY(doorbells), - ENA_STAT_TX_ENTRY(prepare_ctx_err), - ENA_STAT_TX_ENTRY(missing_tx_comp), ENA_STAT_TX_ENTRY(bad_req_id), + ENA_STAT_TX_ENTRY(available_desc), }; static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(cnt), ENA_STAT_RX_ENTRY(bytes), - ENA_STAT_RX_ENTRY(refil_partial), + ENA_STAT_RX_ENTRY(refill_partial), ENA_STAT_RX_ENTRY(bad_csum), - ENA_STAT_RX_ENTRY(page_alloc_fail), - ENA_STAT_RX_ENTRY(skb_alloc_fail), - ENA_STAT_RX_ENTRY(dma_mapping_err), + ENA_STAT_RX_ENTRY(mbuf_alloc_fail), ENA_STAT_RX_ENTRY(bad_desc_num), - ENA_STAT_RX_ENTRY(small_copy_len_pkt), -}; - -static const struct ena_stats ena_stats_ena_com_strings[] = { - ENA_STAT_ENA_COM_ENTRY(aborted_cmd), - ENA_STAT_ENA_COM_ENTRY(submitted_cmd), - ENA_STAT_ENA_COM_ENTRY(completed_cmd), - ENA_STAT_ENA_COM_ENTRY(out_of_space), - ENA_STAT_ENA_COM_ENTRY(no_completion), + ENA_STAT_RX_ENTRY(bad_req_id), }; #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) -#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ DEV_TX_OFFLOAD_UDP_CKSUM |\ @@ -244,10 +221,12 @@ static void ena_tx_queue_release_bufs(struct ena_ring *ring); static int ena_link_update(struct rte_eth_dev *dev, int wait_to_complete); static int ena_create_io_queue(struct ena_ring *ring); -static void ena_free_io_queues_all(struct ena_adapter *adapter); -static int ena_queue_restart(struct ena_ring *ring); -static int ena_queue_restart_all(struct rte_eth_dev *dev, - enum ena_ring_type ring_type); +static void ena_queue_stop(struct ena_ring *ring); +static void ena_queue_stop_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type); +static int ena_queue_start(struct ena_ring *ring); +static int ena_queue_start_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type); static void ena_stats_restart(struct rte_eth_dev *dev); static void ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); @@ -257,9 +236,20 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, static int ena_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); -static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); static void ena_interrupt_handler_rte(void *cb_arg); static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); +static void ena_destroy_device(struct rte_eth_dev *eth_dev); +static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); +static int ena_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int n); +static int ena_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, + unsigned int n); +static int ena_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, + uint64_t *values, + unsigned int n); static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, @@ -270,6 +260,9 @@ static const struct eth_dev_ops ena_dev_ops = { .dev_stop = ena_stop, .link_update = ena_link_update, .stats_get = ena_stats_get, + .xstats_get_names = ena_xstats_get_names, + .xstats_get = ena_xstats_get, + .xstats_get_by_id = ena_xstats_get_by_id, .mtu_set = ena_mtu_set, .rx_queue_release = ena_rx_queue_release, .tx_queue_release = ena_tx_queue_release, @@ -386,6 +379,7 @@ static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; rx_ring->adapter->trigger_reset = true; + ++rx_ring->rx_stats.bad_req_id; return -EFAULT; } @@ -406,6 +400,7 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); /* Trigger device reset */ + ++tx_ring->tx_stats.bad_req_id; tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; tx_ring->adapter->trigger_reset = true; return -EFAULT; @@ -427,13 +422,11 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info->os_type = ENA_ADMIN_OS_DPDK; host_info->kernel_ver = RTE_VERSION; - snprintf((char *)host_info->kernel_ver_str, - sizeof(host_info->kernel_ver_str), - "%s", rte_version()); + strlcpy((char *)host_info->kernel_ver_str, rte_version(), + sizeof(host_info->kernel_ver_str)); host_info->os_dist = RTE_VERSION; - snprintf((char *)host_info->os_dist_str, - sizeof(host_info->os_dist_str), - "%s", rte_version()); + strlcpy((char *)host_info->os_dist_str, rte_version(), + sizeof(host_info->os_dist_str)); host_info->driver_version = (DRV_MODULE_VER_MAJOR) | (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | @@ -457,24 +450,12 @@ err: ena_com_delete_host_info(ena_dev); } -static int -ena_get_sset_count(struct rte_eth_dev *dev, int sset) +/* This function calculates the number of xstats based on the current config */ +static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev) { - if (sset != ETH_SS_STATS) - return -EOPNOTSUPP; - - /* Workaround for clang: - * touch internal structures to prevent - * compiler error - */ - ENA_TOUCH(ena_stats_global_strings); - ENA_TOUCH(ena_stats_tx_strings); - ENA_TOUCH(ena_stats_rx_strings); - ENA_TOUCH(ena_stats_ena_com_strings); - - return dev->data->nb_tx_queues * - (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + - ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; + return ENA_STATS_ARRAY_GLOBAL + + (dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) + + (dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX); } static void ena_config_debug_area(struct ena_adapter *adapter) @@ -482,11 +463,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter) u32 debug_area_size; int rc, ss_count; - ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); - if (ss_count <= 0) { - RTE_LOG(ERR, PMD, "SS count is negative\n"); - return; - } + ss_count = ena_xstats_calc_num(adapter->rte_dev); /* allocate 32 bytes for each string and 64bit for the value */ debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; @@ -544,65 +521,14 @@ static void ena_close(struct rte_eth_dev *dev) static int ena_dev_reset(struct rte_eth_dev *dev) { - struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES]; - struct rte_eth_dev *eth_dev; - struct rte_pci_device *pci_dev; - struct rte_intr_handle *intr_handle; - struct ena_com_dev *ena_dev; - struct ena_com_dev_get_features_ctx get_feat_ctx; - struct ena_adapter *adapter; - int nb_queues; - int rc, i; - bool wd_state; - - adapter = (struct ena_adapter *)(dev->data->dev_private); - ena_dev = &adapter->ena_dev; - eth_dev = adapter->rte_dev; - pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - intr_handle = &pci_dev->intr_handle; - nb_queues = eth_dev->data->nb_rx_queues; - - ena_com_set_admin_running_state(ena_dev, false); + int rc = 0; - rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); + ena_destroy_device(dev); + rc = eth_ena_dev_init(dev); if (rc) - RTE_LOG(ERR, PMD, "Device reset failed\n"); - - for (i = 0; i < nb_queues; i++) - mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; - - ena_rx_queue_release_all(eth_dev); - ena_tx_queue_release_all(eth_dev); - - rte_intr_disable(intr_handle); - - ena_com_abort_admin_commands(ena_dev); - ena_com_wait_for_abort_completion(ena_dev); - ena_com_admin_destroy(ena_dev); - ena_com_mmio_reg_read_request_destroy(ena_dev); - - rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); - if (rc) { - PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); - return rc; - } - adapter->wd_state = wd_state; - - rte_intr_enable(intr_handle); - ena_com_set_admin_polling_mode(ena_dev, false); - ena_com_admin_aenq_enable(ena_dev); - - for (i = 0; i < nb_queues; ++i) - ena_rx_queue_setup(eth_dev, i, adapter->rx_ring[i].ring_size, 0, - NULL, mb_pool_rx[i]); - - for (i = 0; i < nb_queues; ++i) - ena_tx_queue_setup(eth_dev, i, adapter->tx_ring[i].ring_size, 0, - NULL); + PMD_INIT_LOG(CRIT, "Cannot initialize device"); - adapter->trigger_reset = false; - - return 0; + return rc; } static int ena_rss_reta_update(struct rte_eth_dev *dev, @@ -772,11 +698,6 @@ static void ena_rx_queue_release(void *queue) { struct ena_ring *ring = (struct ena_ring *)queue; - ena_assert_msg(ring->configured, - "API violation - releasing not configured queue"); - ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, - "API violation"); - /* Free ring resources */ if (ring->rx_buffer_info) rte_free(ring->rx_buffer_info); @@ -800,14 +721,6 @@ static void ena_tx_queue_release(void *queue) { struct ena_ring *ring = (struct ena_ring *)queue; - ena_assert_msg(ring->configured, - "API violation. Releasing not configured queue"); - ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, - "API violation"); - - /* Free all bufs */ - ena_tx_queue_release_bufs(ring); - /* Free ring resources */ if (ring->push_buf_intermediate_buf) rte_free(ring->push_buf_intermediate_buf); @@ -830,17 +743,13 @@ static void ena_tx_queue_release(void *queue) static void ena_rx_queue_release_bufs(struct ena_ring *ring) { - unsigned int ring_mask = ring->ring_size - 1; - - while (ring->next_to_clean != ring->next_to_use) { - struct rte_mbuf *m = - ring->rx_buffer_info[ring->next_to_clean & ring_mask]; - - if (m) - rte_mbuf_raw_free(m); + unsigned int i; - ring->next_to_clean++; - } + for (i = 0; i < ring->ring_size; ++i) + if (ring->rx_buffer_info[i]) { + rte_mbuf_raw_free(ring->rx_buffer_info[i]); + ring->rx_buffer_info[i] = NULL; + } } static void ena_tx_queue_release_bufs(struct ena_ring *ring) @@ -852,8 +761,6 @@ static void ena_tx_queue_release_bufs(struct ena_ring *ring) if (tx_buf->mbuf) rte_pktmbuf_free(tx_buf->mbuf); - - ring->next_to_clean++; } } @@ -872,8 +779,8 @@ static int ena_link_update(struct rte_eth_dev *dev, return 0; } -static int ena_queue_restart_all(struct rte_eth_dev *dev, - enum ena_ring_type ring_type) +static int ena_queue_start_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type) { struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); @@ -901,18 +808,25 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, "Inconsistent state of tx queues\n"); } - rc = ena_queue_restart(&queues[i]); + rc = ena_queue_start(&queues[i]); if (rc) { PMD_INIT_LOG(ERR, - "failed to restart queue %d type(%d)", + "failed to start queue %d type(%d)", i, ring_type); - return rc; + goto err; } } } return 0; + +err: + while (i--) + if (queues[i].configured) + ena_queue_stop(&queues[i]); + + return rc; } static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) @@ -933,7 +847,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " - "max mtu: %d, min mtu: %d\n", + "max mtu: %d, min mtu: %d", max_frame_len, adapter->max_mtu, ENA_MIN_MTU); return ENA_COM_UNSUPPORTED; } @@ -1020,6 +934,7 @@ static void ena_stats_restart(struct rte_eth_dev *dev) rte_atomic64_init(&adapter->drv_stats->ierrors); rte_atomic64_init(&adapter->drv_stats->oerrors); rte_atomic64_init(&adapter->drv_stats->rx_nombuf); + rte_atomic64_init(&adapter->drv_stats->rx_drops); } static int ena_stats_get(struct rte_eth_dev *dev, @@ -1030,6 +945,8 @@ static int ena_stats_get(struct rte_eth_dev *dev, (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; int rc; + int i; + int max_rings_stats; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -ENOTSUP; @@ -1037,7 +954,7 @@ static int ena_stats_get(struct rte_eth_dev *dev, memset(&ena_stats, 0, sizeof(ena_stats)); rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); if (unlikely(rc)) { - RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); + RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA\n"); return rc; } @@ -1050,13 +967,33 @@ static int ena_stats_get(struct rte_eth_dev *dev, ena_stats.rx_bytes_low); stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, ena_stats.tx_bytes_low); - stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high, - ena_stats.rx_drops_low); /* Driver related stats */ + stats->imissed = rte_atomic64_read(&adapter->drv_stats->rx_drops); stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); + + max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < max_rings_stats; ++i) { + struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; + + stats->q_ibytes[i] = rx_stats->bytes; + stats->q_ipackets[i] = rx_stats->cnt; + stats->q_errors[i] = rx_stats->bad_desc_num + + rx_stats->bad_req_id; + } + + max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < max_rings_stats; ++i) { + struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; + + stats->q_obytes[i] = tx_stats->bytes; + stats->q_opackets[i] = tx_stats->cnt; + } + return 0; } @@ -1066,12 +1003,12 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) struct ena_com_dev *ena_dev; int rc = 0; - ena_assert_msg(dev->data != NULL, "Uninitialized device"); - ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); + ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); + ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); adapter = (struct ena_adapter *)(dev->data->dev_private); ena_dev = &adapter->ena_dev; - ena_assert_msg(ena_dev != NULL, "Uninitialized device"); + ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { RTE_LOG(ERR, PMD, @@ -1101,19 +1038,19 @@ static int ena_start(struct rte_eth_dev *dev) if (rc) return rc; - rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); + rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); if (rc) return rc; - rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); + rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); if (rc) - return rc; + goto err_start_tx; if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { rc = ena_rss_init_default(adapter); if (rc) - return rc; + goto err_rss_init; } ena_stats_restart(dev); @@ -1125,19 +1062,36 @@ static int ena_start(struct rte_eth_dev *dev) rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), ena_timer_wd_callback, adapter); + ++adapter->dev_stats.dev_start; adapter->state = ENA_ADAPTER_STATE_RUNNING; return 0; + +err_rss_init: + ena_queue_stop_all(dev, ENA_RING_TYPE_TX); +err_start_tx: + ena_queue_stop_all(dev, ENA_RING_TYPE_RX); + return rc; } static void ena_stop(struct rte_eth_dev *dev) { struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); + struct ena_com_dev *ena_dev = &adapter->ena_dev; + int rc; rte_timer_stop_sync(&adapter->timer_wd); - ena_free_io_queues_all(adapter); + ena_queue_stop_all(dev, ENA_RING_TYPE_TX); + ena_queue_stop_all(dev, ENA_RING_TYPE_RX); + + if (adapter->trigger_reset) { + rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); + if (rc) + RTE_LOG(ERR, PMD, "Device reset failed rc=%d\n", rc); + } + ++adapter->dev_stats.dev_stop; adapter->state = ENA_ADAPTER_STATE_STOPPED; } @@ -1199,52 +1153,67 @@ static int ena_create_io_queue(struct ena_ring *ring) return 0; } -static void ena_free_io_queues_all(struct ena_adapter *adapter) +static void ena_queue_stop(struct ena_ring *ring) { - struct rte_eth_dev *eth_dev = adapter->rte_dev; - struct ena_com_dev *ena_dev = &adapter->ena_dev; - int i; - uint16_t ena_qid; - uint16_t nb_rxq = eth_dev->data->nb_rx_queues; - uint16_t nb_txq = eth_dev->data->nb_tx_queues; - - for (i = 0; i < nb_txq; ++i) { - ena_qid = ENA_IO_TXQ_IDX(i); - ena_com_destroy_io_queue(ena_dev, ena_qid); + struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; - ena_tx_queue_release_bufs(&adapter->tx_ring[i]); + if (ring->type == ENA_RING_TYPE_RX) { + ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); + ena_rx_queue_release_bufs(ring); + } else { + ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); + ena_tx_queue_release_bufs(ring); } +} - for (i = 0; i < nb_rxq; ++i) { - ena_qid = ENA_IO_RXQ_IDX(i); - ena_com_destroy_io_queue(ena_dev, ena_qid); +static void ena_queue_stop_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + struct ena_ring *queues = NULL; + uint16_t nb_queues, i; - ena_rx_queue_release_bufs(&adapter->rx_ring[i]); + if (ring_type == ENA_RING_TYPE_RX) { + queues = adapter->rx_ring; + nb_queues = dev->data->nb_rx_queues; + } else { + queues = adapter->tx_ring; + nb_queues = dev->data->nb_tx_queues; } + + for (i = 0; i < nb_queues; ++i) + if (queues[i].configured) + ena_queue_stop(&queues[i]); } -static int ena_queue_restart(struct ena_ring *ring) +static int ena_queue_start(struct ena_ring *ring) { int rc, bufs_num; ena_assert_msg(ring->configured == 1, - "Trying to restart unconfigured queue\n"); + "Trying to start unconfigured queue\n"); rc = ena_create_io_queue(ring); if (rc) { - PMD_INIT_LOG(ERR, "Failed to create IO queue!\n"); + PMD_INIT_LOG(ERR, "Failed to create IO queue!"); return rc; } ring->next_to_clean = 0; ring->next_to_use = 0; - if (ring->type == ENA_RING_TYPE_TX) + if (ring->type == ENA_RING_TYPE_TX) { + ring->tx_stats.available_desc = + ena_com_free_desc(ring->ena_com_io_sq); return 0; + } bufs_num = ring->ring_size - 1; rc = ena_populate_rx_queue(ring, bufs_num); if (rc != bufs_num) { + ena_com_destroy_io_queue(&ring->adapter->ena_dev, + ENA_IO_RXQ_IDX(ring->id)); PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); return ENA_COM_FAULT; } @@ -1274,7 +1243,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of TX queue: %d is not a power of 2.", + "Unsupported size of TX queue: %d is not a power of 2.\n", nb_desc); return -EINVAL; } @@ -1330,7 +1299,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; } - /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; @@ -1363,7 +1331,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of RX queue: %d is not a power of 2.", + "Unsupported size of RX queue: %d is not a power of 2.\n", nb_desc); return -EINVAL; } @@ -1413,7 +1381,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, } for (i = 0; i < nb_desc; i++) - rxq->empty_tx_reqs[i] = i; + rxq->empty_rx_reqs[i] = i; /* Store pointer to this queue in upper layer */ rxq->configured = 1; @@ -1436,12 +1404,13 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) return 0; in_use = rxq->next_to_use - rxq->next_to_clean; - ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); + ena_assert_msg(((in_use + count) < ring_size), "bad ring state\n"); /* get resources for incoming packets */ rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); if (unlikely(rc < 0)) { rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); + ++rxq->rx_stats.mbuf_alloc_fail; PMD_RX_LOG(DEBUG, "there are no enough free buffers"); return 0; } @@ -1479,6 +1448,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) "buffers (from %d)\n", rxq->id, i, count); rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), count - i); + ++rxq->rx_stats.refill_partial; } /* When we submitted free recources to device... */ @@ -1609,6 +1579,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) RTE_LOG(ERR, PMD, "Keep alive timeout\n"); adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; adapter->trigger_reset = true; + ++adapter->dev_stats.wd_expired; } } @@ -1669,7 +1640,7 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter, rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); if (unlikely(rc)) { PMD_INIT_LOG(WARNING, "Failed to config dev mode. " - "Fallback to host mode policy.\n"); + "Fallback to host mode policy."); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return 0; } @@ -1715,8 +1686,7 @@ static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev, if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) io_tx_sq_num = get_feat_ctx->llq.max_llq_num; - io_queue_num = RTE_MIN(rte_lcore_count(), ENA_MAX_NUM_IO_QUEUES); - io_queue_num = RTE_MIN(io_queue_num, io_rx_num); + io_queue_num = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); io_queue_num = RTE_MIN(io_queue_num, io_tx_sq_num); io_queue_num = RTE_MIN(io_queue_num, io_tx_cq_num); @@ -1744,19 +1714,20 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) static int adapters_found; bool wd_state; - memset(adapter, 0, sizeof(struct ena_adapter)); - ena_dev = &adapter->ena_dev; - eth_dev->dev_ops = &ena_dev_ops; eth_dev->rx_pkt_burst = ð_ena_recv_pkts; eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; - adapter->rte_eth_dev_data = eth_dev->data; - adapter->rte_dev = eth_dev; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + memset(adapter, 0, sizeof(struct ena_adapter)); + ena_dev = &adapter->ena_dev; + + adapter->rte_eth_dev_data = eth_dev->data; + adapter->rte_dev = eth_dev; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); adapter->pdev = pci_dev; @@ -1832,14 +1803,20 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) /* Set max MTU for this device */ adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; - /* set device support for TSO */ - adapter->tso4_supported = get_feat_ctx.offload.tx & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; + /* set device support for offloads */ + adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0; + adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0; + adapter->offloads.rx_csum_supported = + (get_feat_ctx.offload.rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0; /* Copy MAC address and point DPDK to it */ - eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; - ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, - (struct ether_addr *)adapter->mac_addr); + eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; + rte_ether_addr_copy((struct rte_ether_addr *) + get_feat_ctx.dev_attr.mac_addr, + (struct rte_ether_addr *)adapter->mac_addr); /* * Pass the information to the rte_eth_dev_close() that it should also @@ -1883,24 +1860,43 @@ err: return rc; } -static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) +static void ena_destroy_device(struct rte_eth_dev *eth_dev) { struct ena_adapter *adapter = (struct ena_adapter *)(eth_dev->data->dev_private); + struct ena_com_dev *ena_dev = &adapter->ena_dev; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; + if (adapter->state == ENA_ADAPTER_STATE_FREE) + return; + + ena_com_set_admin_running_state(ena_dev, false); if (adapter->state != ENA_ADAPTER_STATE_CLOSED) ena_close(eth_dev); + ena_com_delete_debug_area(ena_dev); + ena_com_delete_host_info(ena_dev); + + ena_com_abort_admin_commands(ena_dev); + ena_com_wait_for_abort_completion(ena_dev); + ena_com_admin_destroy(ena_dev); + ena_com_mmio_reg_read_request_destroy(ena_dev); + + adapter->state = ENA_ADAPTER_STATE_FREE; +} + +static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + ena_destroy_device(eth_dev); + eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; eth_dev->tx_pkt_prepare = NULL; - adapter->state = ENA_ADAPTER_STATE_FREE; - return 0; } @@ -1948,16 +1944,14 @@ static void ena_infos_get(struct rte_eth_dev *dev, { struct ena_adapter *adapter; struct ena_com_dev *ena_dev; - struct ena_com_dev_get_features_ctx feat; uint64_t rx_feat = 0, tx_feat = 0; - int rc = 0; - ena_assert_msg(dev->data != NULL, "Uninitialized device"); - ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); + ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); + ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); adapter = (struct ena_adapter *)(dev->data->dev_private); ena_dev = &adapter->ena_dev; - ena_assert_msg(ena_dev != NULL, "Uninitialized device"); + ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); dev_info->speed_capa = ETH_LINK_SPEED_1G | @@ -1969,26 +1963,16 @@ static void ena_infos_get(struct rte_eth_dev *dev, ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; - /* Get supported features from HW */ - rc = ena_com_get_dev_attr_feat(ena_dev, &feat); - if (unlikely(rc)) { - RTE_LOG(ERR, PMD, - "Cannot get attribute for ena device rc= %d\n", rc); - return; - } - /* Set Tx & Rx features available for device */ - if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) + if (adapter->offloads.tso4_supported) tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; - if (feat.offload.tx & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) + if (adapter->offloads.tx_csum_supported) tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; - if (feat.offload.rx_supported & - ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) + if (adapter->offloads.rx_csum_supported) rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; @@ -2001,6 +1985,9 @@ static void ena_infos_get(struct rte_eth_dev *dev, dev_info->tx_offload_capa = tx_feat; dev_info->tx_queue_offload_capa = tx_feat; + dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP | + ETH_RSS_UDP; + dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; dev_info->max_rx_pktlen = adapter->max_mtu; dev_info->max_mac_addrs = 1; @@ -2072,6 +2059,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rx_ring->adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; rx_ring->adapter->trigger_reset = true; + ++rx_ring->rx_stats.bad_desc_num; return 0; } @@ -2081,10 +2069,14 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, while (segments < ena_rx_ctx.descs) { req_id = ena_rx_ctx.ena_bufs[segments].req_id; rc = validate_rx_req_id(rx_ring, req_id); - if (unlikely(rc)) + if (unlikely(rc)) { + if (segments != 0) + rte_mbuf_raw_free(mbuf_head); break; + } mbuf = rx_buff_info[req_id]; + rx_buff_info[req_id] = NULL; mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->refcnt = 1; @@ -2111,19 +2103,28 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* fill mbuf attributes if any */ ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); + + if (unlikely(mbuf_head->ol_flags & + (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) + ++rx_ring->rx_stats.bad_csum; + mbuf_head->hash.rss = ena_rx_ctx.hash; /* pass to DPDK application head mbuf */ rx_pkts[recv_idx] = mbuf_head; recv_idx++; + rx_ring->rx_stats.bytes += mbuf_head->pkt_len; } + rx_ring->rx_stats.cnt += recv_idx; rx_ring->next_to_clean = next_to_clean; desc_in_use = desc_in_use - completed + 1; /* Burst refill to save doorbells, memory barriers, const interval */ - if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) + if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) { + ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); + } return recv_idx; } @@ -2136,7 +2137,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t i; struct rte_mbuf *m; struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); - struct ipv4_hdr *ip_hdr; + struct rte_ipv4_hdr *ip_hdr; uint64_t ol_flags; uint16_t frag_field; @@ -2151,33 +2152,33 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * length of the ethernet header. */ if (unlikely(m->l2_len == 0)) - m->l2_len = sizeof(struct ether_hdr); + m->l2_len = sizeof(struct rte_ether_hdr); - ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, + ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->l2_len); frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); - if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { + if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) { m->packet_type |= RTE_PTYPE_L4_NONFRAG; /* If IPv4 header has DF flag enabled and TSO support is * disabled, partial chcecksum should not be calculated. */ - if (!tx_ring->adapter->tso4_supported) + if (!tx_ring->adapter->offloads.tso4_supported) continue; } if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || (ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif @@ -2190,7 +2191,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ret = rte_net_intel_cksum_flags_prepare(m, ol_flags & ~PKT_TX_TCP_SEG); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -2239,9 +2240,14 @@ static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, (header_len < tx_ring->tx_max_header_size)) return 0; + ++tx_ring->tx_stats.linearize; rc = rte_pktmbuf_linearize(mbuf); - if (unlikely(rc)) + if (unlikely(rc)) { RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); + rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); + ++tx_ring->tx_stats.linearize_failed; + return rc; + } return rc; } @@ -2264,6 +2270,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t push_len = 0; uint16_t delta = 0; int nb_hw_desc; + uint32_t total_length; /* Check adapter state */ if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { @@ -2278,6 +2285,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { mbuf = tx_pkts[sent_idx]; + total_length = 0; rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); if (unlikely(rc)) @@ -2343,6 +2351,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ebuf++; tx_info->num_of_bufs++; } + total_length += mbuf->data_len; while ((mbuf = mbuf->next) != NULL) { seg_len = mbuf->data_len; @@ -2355,6 +2364,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta; ebuf->len = seg_len - delta; + total_length += ebuf->len; ebuf++; tx_info->num_of_bufs++; @@ -2375,20 +2385,25 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* prepare the packet's descriptors to dma engine */ rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, &nb_hw_desc); - if (unlikely(rc)) + if (unlikely(rc)) { + ++tx_ring->tx_stats.prepare_ctx_err; break; - + } tx_info->tx_descs = nb_hw_desc; next_to_use++; + tx_ring->tx_stats.cnt += tx_info->num_of_bufs; + tx_ring->tx_stats.bytes += total_length; } + tx_ring->tx_stats.available_desc = + ena_com_free_desc(tx_ring->ena_com_io_sq); /* If there are ready packets to be xmitted... */ if (sent_idx > 0) { /* ...let HW do its best :-) */ rte_wmb(); ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); - + tx_ring->tx_stats.doorbells++; tx_ring->next_to_use = next_to_use; } @@ -2415,16 +2430,177 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) break; } + tx_ring->tx_stats.available_desc = + ena_com_free_desc(tx_ring->ena_com_io_sq); if (total_tx_descs > 0) { /* acknowledge completion of sent packets */ - ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); tx_ring->next_to_clean = next_to_clean; + ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); + ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); } + tx_ring->tx_stats.tx_poll++; + return sent_idx; } +/** + * DPDK callback to retrieve names of extended device statistics + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] xstats_names + * Buffer to insert names into. + * @param n + * Number of names. + * + * @return + * Number of xstats names. + */ +static int ena_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int n) +{ + unsigned int xstats_count = ena_xstats_calc_num(dev); + unsigned int stat, i, count = 0; + + if (n < xstats_count || !xstats_names) + return xstats_count; + + for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) + strcpy(xstats_names[count].name, + ena_stats_global_strings[stat].name); + + for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) + for (i = 0; i < dev->data->nb_rx_queues; i++, count++) + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%d_%s", i, + ena_stats_rx_strings[stat].name); + + for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) + for (i = 0; i < dev->data->nb_tx_queues; i++, count++) + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%d_%s", i, + ena_stats_tx_strings[stat].name); + + return xstats_count; +} + +/** + * DPDK callback to get extended device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] stats + * Stats table output buffer. + * @param n + * The size of the stats table. + * + * @return + * Number of xstats on success, negative on failure. + */ +static int ena_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + unsigned int xstats_count = ena_xstats_calc_num(dev); + unsigned int stat, i, count = 0; + int stat_offset; + void *stats_begin; + + if (n < xstats_count) + return xstats_count; + + if (!xstats) + return 0; + + for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { + stat_offset = ena_stats_rx_strings[stat].stat_offset; + stats_begin = &adapter->dev_stats; + + xstats[count].id = count; + xstats[count].value = *((uint64_t *) + ((char *)stats_begin + stat_offset)); + } + + for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { + for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { + stat_offset = ena_stats_rx_strings[stat].stat_offset; + stats_begin = &adapter->rx_ring[i].rx_stats; + + xstats[count].id = count; + xstats[count].value = *((uint64_t *) + ((char *)stats_begin + stat_offset)); + } + } + + for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { + for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { + stat_offset = ena_stats_tx_strings[stat].stat_offset; + stats_begin = &adapter->tx_ring[i].rx_stats; + + xstats[count].id = count; + xstats[count].value = *((uint64_t *) + ((char *)stats_begin + stat_offset)); + } + } + + return count; +} + +static int ena_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, + uint64_t *values, + unsigned int n) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + uint64_t id; + uint64_t rx_entries, tx_entries; + unsigned int i; + int qid; + int valid = 0; + for (i = 0; i < n; ++i) { + id = ids[i]; + /* Check if id belongs to global statistics */ + if (id < ENA_STATS_ARRAY_GLOBAL) { + values[i] = *((uint64_t *)&adapter->dev_stats + id); + ++valid; + continue; + } + + /* Check if id belongs to rx queue statistics */ + id -= ENA_STATS_ARRAY_GLOBAL; + rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; + if (id < rx_entries) { + qid = id % dev->data->nb_rx_queues; + id /= dev->data->nb_rx_queues; + values[i] = *((uint64_t *) + &adapter->rx_ring[qid].rx_stats + id); + ++valid; + continue; + } + /* Check if id belongs to rx queue statistics */ + id -= rx_entries; + tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; + if (id < tx_entries) { + qid = id % dev->data->nb_tx_queues; + id /= dev->data->nb_tx_queues; + values[i] = *((uint64_t *) + &adapter->tx_ring[qid].tx_stats + id); + ++valid; + continue; + } + } + + return valid; +} + /********************************************************************* * PMD configuration *********************************************************************/ @@ -2511,8 +2687,14 @@ static void ena_keep_alive(void *adapter_data, __rte_unused struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + struct ena_admin_aenq_keep_alive_desc *desc; + uint64_t rx_drops; adapter->timestamp_wd = rte_get_timer_cycles(); + + desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; + rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; + rte_atomic64_set(&adapter->drv_stats->rx_drops, rx_drops); } /**