X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fbase%2Fena_com.c;h=aae68721fb04df231f7cc1f037c0ef5c67a6b295;hb=2b6d6d71a0992220043b2f5c3b885c486e7921b7;hp=e9b9be28dc6a1b6e384534152e6c6baddd8026d0;hpb=8190a843ef9bce3696b74ad2b057b5d86cc7b0df;p=dpdk.git diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c index e9b9be28dc..aae68721fb 100644 --- a/drivers/net/ena/base/ena_com.c +++ b/drivers/net/ena/base/ena_com.c @@ -1,35 +1,7 @@ -/*- -* BSD LICENSE -* -* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above copyright -* notice, this list of conditions and the following disclaimer in -* the documentation and/or other materials provided with the -* distribution. -* * Neither the name of copyright holder nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ #include "ena_com.h" @@ -42,7 +14,6 @@ #define ENA_ASYNC_QUEUE_DEPTH 16 #define ENA_ADMIN_QUEUE_DEPTH 32 - #define ENA_CTRL_MAJOR 0 #define ENA_CTRL_MINOR 0 #define ENA_CTRL_SUB_MINOR 1 @@ -63,7 +34,9 @@ #define ENA_REGS_ADMIN_INTR_MASK 1 -#define ENA_POLL_MS 5 +#define ENA_MIN_ADMIN_POLL_US 100 + +#define ENA_MAX_ADMIN_POLL_US 5000 /*****************************************************************************/ /*****************************************************************************/ @@ -92,7 +65,7 @@ struct ena_com_stats_ctx { struct ena_admin_acq_get_stats_resp get_resp; }; -static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, +static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, struct ena_common_mem_addr *ena_addr, dma_addr_t addr) { @@ -116,7 +89,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) sq->mem_handle); if (!sq->entries) { - ena_trc_err("memory allocation failed"); + ena_trc_err("memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -138,7 +111,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) cq->mem_handle); if (!cq->entries) { - ena_trc_err("memory allocation failed"); + ena_trc_err("memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -163,7 +136,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev, aenq->mem_handle); if (!aenq->entries) { - ena_trc_err("memory allocation failed"); + ena_trc_err("memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -193,7 +166,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev, return 0; } -static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, +static void comp_ctxt_release(struct ena_com_admin_queue *queue, struct ena_comp_ctx *comp_ctx) { comp_ctx->occupied = false; @@ -209,6 +182,11 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, return NULL; } + if (unlikely(!queue->comp_ctx)) { + ena_trc_err("Completion context is NULL\n"); + return NULL; + } + if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { ena_trc_err("Completion context is occupied\n"); return NULL; @@ -282,7 +260,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu return comp_ctx; } -static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) +static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) { size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); struct ena_comp_ctx *comp_ctx; @@ -290,7 +268,7 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size); if (unlikely(!queue->comp_ctx)) { - ena_trc_err("memory allocation failed"); + ena_trc_err("memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -363,18 +341,21 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, } if (!io_sq->desc_addr.virt_addr) { - ena_trc_err("memory allocation failed"); + ena_trc_err("memory allocation failed\n"); return ENA_COM_NO_MEM; } } if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { /* Allocate bounce buffers */ - io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size; - io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; + io_sq->bounce_buf_ctrl.buffer_size = + ena_dev->llq_info.desc_list_entry_size; + io_sq->bounce_buf_ctrl.buffers_num = + ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; io_sq->bounce_buf_ctrl.next_to_use = 0; - size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num; + size = io_sq->bounce_buf_ctrl.buffer_size * + io_sq->bounce_buf_ctrl.buffers_num; ENA_MEM_ALLOC_NODE(ena_dev->dmadev, size, @@ -385,11 +366,12 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size); if (!io_sq->bounce_buf_ctrl.base_buffer) { - ena_trc_err("bounce buffer memory allocation failed"); + ena_trc_err("bounce buffer memory allocation failed\n"); return ENA_COM_NO_MEM; } - memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info)); + memcpy(&io_sq->llq_info, &ena_dev->llq_info, + sizeof(io_sq->llq_info)); /* Initiate the first bounce buffer */ io_sq->llq_buf_ctrl.curr_bounce_buf = @@ -398,6 +380,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 0x0, io_sq->llq_info.desc_list_entry_size); io_sq->llq_buf_ctrl.descs_left_in_line = io_sq->llq_info.descs_num_before_header; + io_sq->disable_meta_caching = + io_sq->llq_info.disable_meta_caching; if (io_sq->llq_info.max_entries_in_tx_burst > 0) io_sq->entries_in_tx_burst_left = @@ -429,23 +413,25 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; io_cq->bus = ena_dev->bus; - ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, - size, - io_cq->cdesc_addr.virt_addr, - io_cq->cdesc_addr.phys_addr, - io_cq->cdesc_addr.mem_handle, - ctx->numa_node, - prev_node); + ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev, + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + io_cq->cdesc_addr.mem_handle, + ctx->numa_node, + prev_node, + ENA_CDESC_RING_SIZE_ALIGNMENT); if (!io_cq->cdesc_addr.virt_addr) { - ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, - size, - io_cq->cdesc_addr.virt_addr, - io_cq->cdesc_addr.phys_addr, - io_cq->cdesc_addr.mem_handle); + ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev, + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + io_cq->cdesc_addr.mem_handle, + ENA_CDESC_RING_SIZE_ALIGNMENT); } if (!io_cq->cdesc_addr.virt_addr) { - ena_trc_err("memory allocation failed"); + ena_trc_err("memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -523,12 +509,9 @@ static int ena_com_comp_status_to_errno(u8 comp_status) if (unlikely(comp_status != 0)) ena_trc_err("admin command failed[%u]\n", comp_status); - if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) - return ENA_COM_INVAL; - switch (comp_status) { case ENA_ADMIN_SUCCESS: - return 0; + return ENA_COM_OK; case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: return ENA_COM_NO_MEM; case ENA_ADMIN_UNSUPPORTED_OPCODE: @@ -538,26 +521,36 @@ static int ena_com_comp_status_to_errno(u8 comp_status) case ENA_ADMIN_ILLEGAL_PARAMETER: case ENA_ADMIN_UNKNOWN_ERROR: return ENA_COM_INVAL; + case ENA_ADMIN_RESOURCE_BUSY: + return ENA_COM_TRY_AGAIN; } - return 0; + return ENA_COM_INVAL; +} + +static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) +{ + delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us); + delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); + ENA_USLEEP(delay_us); } static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { unsigned long flags = 0; - uint64_t timeout; + ena_time_t timeout; int ret; + u32 exp = 0; timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout); while (1) { - ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); - ena_com_handle_admin_completion(admin_queue); - ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); - if (comp_ctx->status != ENA_CMD_SUBMITTED) + if (comp_ctx->status != ENA_CMD_SUBMITTED) break; if (ENA_TIME_EXPIRE(timeout)) { @@ -572,7 +565,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c goto err; } - ENA_MSLEEP(ENA_POLL_MS); + ena_delay_exponential_backoff_us(exp++, + admin_queue->ena_dev->ena_min_poll_delay_us); } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { @@ -596,7 +590,7 @@ err: /** * Set the LLQ configurations of the firmware * - * The driver provides only the enabled feature values to the FW, + * The driver provides only the enabled feature values to the device, * which in turn, checks if they are supported. */ static int ena_com_set_llq(struct ena_com_dev *ena_dev) @@ -618,6 +612,10 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; + cmd.u.llq.accel_mode.u.set.enabled_flags = + BIT(ENA_ADMIN_DISABLE_META_CACHING) | + BIT(ENA_ADMIN_LIMIT_TX_BURST); + ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), @@ -635,6 +633,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, struct ena_llq_configurations *llq_default_cfg) { struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + struct ena_admin_accel_mode_get llq_accel_mode_get; u16 supported_feat; int rc; @@ -643,7 +642,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, supported_feat = llq_features->header_location_ctrl_supported; if (likely(supported_feat & llq_default_cfg->llq_header_location)) { - llq_info->header_location_ctrl = llq_default_cfg->llq_header_location; + llq_info->header_location_ctrl = + llq_default_cfg->llq_header_location; } else { ena_trc_err("Invalid header location control, supported: 0x%x\n", supported_feat); @@ -651,8 +651,6 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, } if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { - llq_info->inline_header = true; - supported_feat = llq_features->descriptors_stride_ctrl_supported; if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; @@ -667,14 +665,12 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, return -EINVAL; } - ena_trc_err("Default llq stride ctrl is not supported, performing fallback," - "default: 0x%x, supported: 0x%x, used: 0x%x\n", + ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", llq_default_cfg->llq_stride_ctrl, supported_feat, llq_info->desc_stride_ctrl); } } else { - llq_info->inline_header = false; llq_info->desc_stride_ctrl = 0; } @@ -697,8 +693,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, return -EINVAL; } - ena_trc_err("Default llq ring entry size is not supported, performing fallback," - "default: 0x%x, supported: 0x%x, used: 0x%x\n", + ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", llq_default_cfg->llq_ring_entry_size, supported_feat, llq_info->desc_list_entry_size); @@ -736,25 +731,30 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, return -EINVAL; } - ena_trc_err("Default llq num descs before header is not supported, performing fallback," - "default: 0x%x, supported: 0x%x, used: 0x%x\n", + ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", llq_default_cfg->llq_num_decs_before_header, supported_feat, llq_info->descs_num_before_header); } + /* Check for accelerated queue supported */ + llq_accel_mode_get = llq_features->accel_mode.u.get; - llq_info->max_entries_in_tx_burst = - (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value); + llq_info->disable_meta_caching = + !!(llq_accel_mode_get.supported_flags & + BIT(ENA_ADMIN_DISABLE_META_CACHING)); + + if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) + llq_info->max_entries_in_tx_burst = + llq_accel_mode_get.max_tx_burst_size / + llq_default_cfg->llq_ring_entry_size_value; rc = ena_com_set_llq(ena_dev); if (rc) ena_trc_err("Cannot set LLQ configuration: %d\n", rc); - return 0; + return rc; } - - static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { @@ -775,16 +775,25 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com admin_queue->stats.no_completion++; ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); - if (comp_ctx->status == ENA_CMD_COMPLETED) - ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", - comp_ctx->cmd_opcode); - else - ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", + if (comp_ctx->status == ENA_CMD_COMPLETED) { + ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", + comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF"); + /* Check if fallback to polling is enabled */ + if (admin_queue->auto_polling) + admin_queue->polling = true; + } else { + ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n", comp_ctx->cmd_opcode, comp_ctx->status); - - admin_queue->running_state = false; - ret = ENA_COM_TIMER_EXPIRED; - goto err; + } + /* Check if shifted to polling mode. + * This will happen if there is a completion without an interrupt + * and autopolling mode is enabled. Continuing normal execution in such case + */ + if (!admin_queue->polling) { + admin_queue->running_state = false; + ret = ENA_COM_TIMER_EXPIRED; + goto err; + } } ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); @@ -845,7 +854,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) } if (read_resp->reg_off != offset) { - ena_trc_err("Read failure: wrong offset provided"); + ena_trc_err("Read failure: wrong offset provided\n"); ret = ENA_MMIO_READ_TIMEOUT; } else { ret = read_resp->reg_val; @@ -940,8 +949,9 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, } if (io_sq->bounce_buf_ctrl.base_buffer) { - size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; - ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); + ENA_MEM_FREE(ena_dev->dmadev, + io_sq->bounce_buf_ctrl.base_buffer, + (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT)); io_sq->bounce_buf_ctrl.base_buffer = NULL; } } @@ -949,12 +959,13 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, u16 exp_state) { - u32 val, i; + u32 val, exp = 0; + ena_time_t timeout_stamp; - /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ - timeout = (timeout * 100) / ENA_POLL_MS; + /* Convert timeout from resolution of 100ms to us resolution. */ + timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout); - for (i = 0; i < timeout; i++) { + while (1) { val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { @@ -966,10 +977,11 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, exp_state) return 0; - ENA_MSLEEP(ENA_POLL_MS); - } + if (ENA_TIME_EXPIRE(timeout_stamp)) + return ENA_COM_TIMER_EXPIRED; - return ENA_COM_TIMER_EXPIRED; + ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); + } } static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, @@ -1052,10 +1064,30 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev, feature_ver); } +int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev) +{ + return ena_dev->rss.hash_func; +} + +static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) +{ + struct ena_admin_feature_rss_flow_hash_control *hash_key = + (ena_dev->rss).hash_key; + + ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key)); + /* The key buffer is stored in the device in an array of + * uint32 elements. + */ + hash_key->keys_num = ENA_ADMIN_RSS_KEY_PARTS; +} + static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) + return ENA_COM_UNSUPPORTED; + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key, @@ -1183,7 +1215,9 @@ static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) rss->rss_ind_tbl = NULL; if (rss->host_rss_ind_tbl) - ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl); + ENA_MEM_FREE(ena_dev->dmadev, + rss->host_rss_ind_tbl, + ((1ULL << rss->tbl_log_size) * sizeof(u16))); rss->host_rss_ind_tbl = NULL; } @@ -1284,63 +1318,29 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) return 0; } -static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) -{ - u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; - struct ena_rss *rss = &ena_dev->rss; - u8 idx; - u16 i; - - for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) - dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; - - for (i = 0; i < 1 << rss->tbl_log_size; i++) { - if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) - return ENA_COM_INVAL; - idx = (u8)rss->rss_ind_tbl[i].cq_idx; - - if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) - return ENA_COM_INVAL; - - rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; - } - - return 0; -} - -static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) -{ - size_t size; - - size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; - - ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size); - if (!ena_dev->intr_moder_tbl) - return ENA_COM_NO_MEM; - - ena_com_config_default_interrupt_moderation_table(ena_dev); - - return 0; -} - static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 intr_delay_resolution) { - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - unsigned int i; + u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; - if (!intr_delay_resolution) { + if (unlikely(!intr_delay_resolution)) { ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); - intr_delay_resolution = 1; + intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; } - ena_dev->intr_delay_resolution = intr_delay_resolution; /* update Rx */ - for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) - intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; + ena_dev->intr_moder_rx_interval = + ena_dev->intr_moder_rx_interval * + prev_intr_delay_resolution / + intr_delay_resolution; /* update Tx */ - ena_dev->intr_moder_tx_interval /= intr_delay_resolution; + ena_dev->intr_moder_tx_interval = + ena_dev->intr_moder_tx_interval * + prev_intr_delay_resolution / + intr_delay_resolution; + + ena_dev->intr_delay_resolution = intr_delay_resolution; } /*****************************************************************************/ @@ -1479,11 +1479,12 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; unsigned long flags = 0; + u32 exp = 0; ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) { ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); - ENA_MSLEEP(ENA_POLL_MS); + ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); } ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); @@ -1662,9 +1663,13 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) struct ena_com_aenq *aenq = &ena_dev->aenq; u16 size; - ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event); - if (admin_queue->comp_ctx) - ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx); + if (admin_queue->comp_ctx) { + ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event); + ENA_MEM_FREE(ena_dev->dmadev, + admin_queue->comp_ctx, + (admin_queue->q_depth * sizeof(struct ena_comp_ctx))); + } + admin_queue->comp_ctx = NULL; size = ADMIN_SQ_SIZE(admin_queue->q_depth); if (sq->entries) @@ -1698,6 +1703,17 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) ena_dev->admin_queue.polling = polling; } +bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev) +{ + return ena_dev->admin_queue.polling; +} + +void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, + bool polling) +{ + ena_dev->admin_queue.auto_polling = polling; +} + int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) { struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; @@ -1835,6 +1851,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, if (ret) goto error; + admin_queue->ena_dev = ena_dev; admin_queue->running_state = true; return 0; @@ -1931,62 +1948,6 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev, return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); } -int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev) -{ - struct ena_admin_get_feat_resp resp; - struct ena_extra_properties_strings *extra_properties_strings = - &ena_dev->extra_properties_strings; - u32 rc; - extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT * - ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN; - - ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, - extra_properties_strings->size, - extra_properties_strings->virt_addr, - extra_properties_strings->dma_addr, - extra_properties_strings->dma_handle); - if (unlikely(!extra_properties_strings->virt_addr)) { - ena_trc_err("Failed to allocate extra properties strings\n"); - return 0; - } - - rc = ena_com_get_feature_ex(ena_dev, &resp, - ENA_ADMIN_EXTRA_PROPERTIES_STRINGS, - extra_properties_strings->dma_addr, - extra_properties_strings->size, 0); - if (rc) { - ena_trc_dbg("Failed to get extra properties strings\n"); - goto err; - } - - return resp.u.extra_properties_strings.count; -err: - ena_com_delete_extra_properties_strings(ena_dev); - return 0; -} - -void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev) -{ - struct ena_extra_properties_strings *extra_properties_strings = - &ena_dev->extra_properties_strings; - - if (extra_properties_strings->virt_addr) { - ENA_MEM_FREE_COHERENT(ena_dev->dmadev, - extra_properties_strings->size, - extra_properties_strings->virt_addr, - extra_properties_strings->dma_addr, - extra_properties_strings->dma_handle); - extra_properties_strings->virt_addr = NULL; - } -} - -int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev, - struct ena_admin_get_feat_resp *resp) -{ - return ena_com_get_feature(ena_dev, resp, - ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0); -} - int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { @@ -2108,7 +2069,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) struct ena_admin_aenq_entry *aenq_e; struct ena_admin_aenq_common_desc *aenq_common; struct ena_com_aenq *aenq = &dev->aenq; - unsigned long long timestamp; + u64 timestamp; ena_aenq_handler handler_cb; u16 masked_head, processed = 0; u8 phase; @@ -2126,10 +2087,10 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) */ dma_rmb(); - timestamp = (unsigned long long)aenq_common->timestamp_low | - ((unsigned long long)aenq_common->timestamp_high << 32); + timestamp = (u64)aenq_common->timestamp_low | + ((u64)aenq_common->timestamp_high << 32); ENA_TOUCH(timestamp); /* In case debug is disabled */ - ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", + ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n", aenq_common->group, aenq_common->syndrom, timestamp); @@ -2254,6 +2215,21 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev, return ret; } +int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, + struct ena_admin_eni_stats *stats) +{ + struct ena_com_stats_ctx ctx; + int ret; + + memset(&ctx, 0x0, sizeof(ctx)); + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); + if (likely(ret == 0)) + memcpy(stats, &ctx.get_resp.u.eni_stats, + sizeof(ctx.get_resp.u.eni_stats)); + + return ret; +} + int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats) { @@ -2263,8 +2239,8 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, memset(&ctx, 0x0, sizeof(ctx)); ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); if (likely(ret == 0)) - memcpy(stats, &ctx.get_resp.basic_stats, - sizeof(ctx.get_resp.basic_stats)); + memcpy(stats, &ctx.get_resp.u.basic_stats, + sizeof(ctx.get_resp.u.basic_stats)); return ret; } @@ -2341,7 +2317,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) if (unlikely(ret)) return ret; - if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) { + if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { ena_trc_err("Func hash %d isn't supported by device, abort\n", rss->hash_func); return ENA_COM_UNSUPPORTED; @@ -2384,12 +2360,14 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, enum ena_admin_hash_functions func, const u8 *key, u16 key_len, u32 init_val) { - struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_flow_hash_control *hash_key; struct ena_admin_get_feat_resp get_resp; - struct ena_admin_feature_rss_flow_hash_control *hash_key = - rss->hash_key; + enum ena_admin_hash_functions old_func; + struct ena_rss *rss = &ena_dev->rss; int rc; + hash_key = rss->hash_key; + /* Make sure size is a mult of DWs */ if (unlikely(key_len & 0x3)) return ENA_COM_INVAL; @@ -2401,22 +2379,23 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, if (unlikely(rc)) return rc; - if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { + if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { ena_trc_err("Flow hash function %d isn't supported\n", func); return ENA_COM_UNSUPPORTED; } switch (func) { case ENA_ADMIN_TOEPLITZ: - if (key_len > sizeof(hash_key->key)) { - ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n", - key_len, sizeof(hash_key->key)); - return ENA_COM_INVAL; + if (key) { + if (key_len != sizeof(hash_key->key)) { + ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n", + key_len, sizeof(hash_key->key)); + return ENA_COM_INVAL; + } + memcpy(hash_key->key, key, key_len); + rss->hash_init_val = init_val; + hash_key->keys_num = key_len / sizeof(u32); } - - memcpy(hash_key->key, key, key_len); - rss->hash_init_val = init_val; - hash_key->keys_num = key_len >> 2; break; case ENA_ADMIN_CRC32: rss->hash_init_val = init_val; @@ -2426,25 +2405,27 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, return ENA_COM_INVAL; } + old_func = rss->hash_func; + rss->hash_func = func; rc = ena_com_set_hash_function(ena_dev); /* Restore the old function */ if (unlikely(rc)) - ena_com_get_hash_function(ena_dev, NULL, NULL); + rss->hash_func = old_func; return rc; } int ena_com_get_hash_function(struct ena_com_dev *ena_dev, - enum ena_admin_hash_functions *func, - u8 *key) + enum ena_admin_hash_functions *func) { struct ena_rss *rss = &ena_dev->rss; struct ena_admin_get_feat_resp get_resp; - struct ena_admin_feature_rss_flow_hash_control *hash_key = - rss->hash_key; int rc; + if (unlikely(!func)) + return ENA_COM_INVAL; + rc = ena_com_get_feature_ex(ena_dev, &get_resp, ENA_ADMIN_RSS_HASH_FUNCTION, rss->hash_key_dma_addr, @@ -2452,9 +2433,20 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, if (unlikely(rc)) return rc; - rss->hash_func = get_resp.u.flow_hash_func.selected_func; - if (func) - *func = rss->hash_func; + /* ENA_FFS() returns 1 in case the lsb is set */ + rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func); + if (rss->hash_func) + rss->hash_func--; + + *func = rss->hash_func; + + return 0; +} + +int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key) +{ + struct ena_admin_feature_rss_flow_hash_control *hash_key = + ena_dev->rss.hash_key; if (key) memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); @@ -2716,10 +2708,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) if (!ind_tbl) return 0; - rc = ena_com_ind_tbl_convert_from_device(ena_dev); - if (unlikely(rc)) - return rc; - for (i = 0; i < (1 << rss->tbl_log_size); i++) ind_tbl[i] = rss->host_rss_ind_tbl[i]; @@ -2736,8 +2724,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) if (unlikely(rc)) goto err_indr_tbl; + /* The following function might return unsupported in case the + * device doesn't support setting the key / hash function. We can safely + * ignore this error and have indirection table support only. + */ rc = ena_com_hash_key_allocate(ena_dev); - if (unlikely(rc)) + if (likely(!rc)) + ena_com_hash_key_fill_default_key(ena_dev); + else if (rc != ENA_COM_UNSUPPORTED) goto err_hash_key; rc = ena_com_hash_ctrl_init(ena_dev); @@ -2887,42 +2881,35 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) ENA_ADMIN_INTERRUPT_MODERATION); } -int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, - u32 tx_coalesce_usecs) +static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, + u32 intr_delay_resolution, + u32 *intr_moder_interval) { - if (!ena_dev->intr_delay_resolution) { + if (!intr_delay_resolution) { ena_trc_err("Illegal interrupt delay granularity value\n"); return ENA_COM_FAULT; } - ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / - ena_dev->intr_delay_resolution; + *intr_moder_interval = coalesce_usecs / intr_delay_resolution; return 0; } -int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, - u32 rx_coalesce_usecs) -{ - if (!ena_dev->intr_delay_resolution) { - ena_trc_err("Illegal interrupt delay granularity value\n"); - return ENA_COM_FAULT; - } - /* We use LOWEST entry of moderation table for storing - * nonadaptive interrupt coalescing values - */ - ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = - rx_coalesce_usecs / ena_dev->intr_delay_resolution; - - return 0; +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs) +{ + return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs, + ena_dev->intr_delay_resolution, + &ena_dev->intr_moder_tx_interval); } -void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs) { - if (ena_dev->intr_moder_tbl) - ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl); - ena_dev->intr_moder_tbl = NULL; + return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs, + ena_dev->intr_delay_resolution, + &ena_dev->intr_moder_rx_interval); } int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) @@ -2949,62 +2936,14 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) return rc; } - rc = ena_com_init_interrupt_moderation_table(ena_dev); - if (rc) - goto err; - /* if moderation is supported by device we set adaptive moderation */ delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); - ena_com_enable_adaptive_moderation(ena_dev); - return 0; -err: - ena_com_destroy_interrupt_moderation(ena_dev); - return rc; -} + /* Disable adaptive moderation by default - can be enabled later */ + ena_com_disable_adaptive_moderation(ena_dev); -void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) -{ - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - - if (!intr_moder_tbl) - return; - - intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = - ENA_INTR_LOWEST_USECS; - intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = - ENA_INTR_LOWEST_PKTS; - intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = - ENA_INTR_LOWEST_BYTES; - - intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = - ENA_INTR_LOW_USECS; - intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = - ENA_INTR_LOW_PKTS; - intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = - ENA_INTR_LOW_BYTES; - - intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = - ENA_INTR_MID_USECS; - intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = - ENA_INTR_MID_PKTS; - intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = - ENA_INTR_MID_BYTES; - - intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = - ENA_INTR_HIGH_USECS; - intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = - ENA_INTR_HIGH_PKTS; - intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = - ENA_INTR_HIGH_BYTES; - - intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = - ENA_INTR_HIGHEST_USECS; - intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = - ENA_INTR_HIGHEST_PKTS; - intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = - ENA_INTR_HIGHEST_BYTES; + return 0; } unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) @@ -3014,57 +2953,15 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) { - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - - if (intr_moder_tbl) - return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; - - return 0; -} - -void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, - enum ena_intr_moder_level level, - struct ena_intr_moder_entry *entry) -{ - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - - if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) - return; - - intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; - if (ena_dev->intr_delay_resolution) - intr_moder_tbl[level].intr_moder_interval /= - ena_dev->intr_delay_resolution; - intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; - - /* use hardcoded value until ethtool supports bytecount parameter */ - if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) - intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; -} - -void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, - enum ena_intr_moder_level level, - struct ena_intr_moder_entry *entry) -{ - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - - if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) - return; - - entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; - if (ena_dev->intr_delay_resolution) - entry->intr_moder_interval *= ena_dev->intr_delay_resolution; - entry->pkts_per_interval = - intr_moder_tbl[level].pkts_per_interval; - entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; + return ena_dev->intr_moder_rx_interval; } int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq_features, struct ena_llq_configurations *llq_default_cfg) { + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; int rc; - int size; if (!llq_features->max_llq_num) { ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; @@ -3075,14 +2972,12 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, if (rc) return rc; - /* Validate the descriptor is not too big */ - size = ena_dev->tx_max_header_size; - size += ena_dev->llq_info.descs_num_before_header * - sizeof(struct ena_eth_io_tx_desc); + ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - + (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); - if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) { + if (unlikely(ena_dev->tx_max_header_size == 0)) { ena_trc_err("the size of the LLQ entry is smaller than needed\n"); - return ENA_COM_INVAL; + return -EINVAL; } ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;