1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
8 /*****************************************************************************/
9 /*****************************************************************************/
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
17 #define ENA_CTRL_MAJOR 0
18 #define ENA_CTRL_MINOR 0
19 #define ENA_CTRL_SUB_MINOR 1
21 #define MIN_ENA_CTRL_VER \
22 (((ENA_CTRL_MAJOR) << \
23 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
24 ((ENA_CTRL_MINOR) << \
25 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
28 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
29 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
31 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
33 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
35 #define ENA_REGS_ADMIN_INTR_MASK 1
37 #define ENA_MIN_ADMIN_POLL_US 100
39 #define ENA_MAX_ADMIN_POLL_US 5000
41 /*****************************************************************************/
42 /*****************************************************************************/
43 /*****************************************************************************/
48 /* Abort - canceled by the driver */
53 ena_wait_event_t wait_event;
54 struct ena_admin_acq_entry *user_cqe;
56 enum ena_cmd_status status;
57 /* status from the device */
63 struct ena_com_stats_ctx {
64 struct ena_admin_aq_get_stats_cmd get_cmd;
65 struct ena_admin_acq_get_stats_resp get_resp;
68 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
69 struct ena_common_mem_addr *ena_addr,
72 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
73 ena_trc_err("dma address has more bits that the device supports\n");
77 ena_addr->mem_addr_low = lower_32_bits(addr);
78 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
83 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
85 struct ena_com_admin_sq *sq = &queue->sq;
86 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
88 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
92 ena_trc_err("memory allocation failed\n");
93 return ENA_COM_NO_MEM;
105 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
107 struct ena_com_admin_cq *cq = &queue->cq;
108 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
110 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
114 ena_trc_err("memory allocation failed\n");
115 return ENA_COM_NO_MEM;
124 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
125 struct ena_aenq_handlers *aenq_handlers)
127 struct ena_com_aenq *aenq = &dev->aenq;
128 u32 addr_low, addr_high, aenq_caps;
131 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
132 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
133 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
138 if (!aenq->entries) {
139 ena_trc_err("memory allocation failed\n");
140 return ENA_COM_NO_MEM;
143 aenq->head = aenq->q_depth;
146 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
147 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
149 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
150 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
153 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
154 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
155 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
156 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
157 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
159 if (unlikely(!aenq_handlers)) {
160 ena_trc_err("aenq handlers pointer is NULL\n");
161 return ENA_COM_INVAL;
164 aenq->aenq_handlers = aenq_handlers;
169 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
170 struct ena_comp_ctx *comp_ctx)
172 comp_ctx->occupied = false;
173 ATOMIC32_DEC(&queue->outstanding_cmds);
176 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
177 u16 command_id, bool capture)
179 if (unlikely(command_id >= queue->q_depth)) {
180 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
181 command_id, queue->q_depth);
185 if (unlikely(!queue->comp_ctx)) {
186 ena_trc_err("Completion context is NULL\n");
190 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
191 ena_trc_err("Completion context is occupied\n");
196 ATOMIC32_INC(&queue->outstanding_cmds);
197 queue->comp_ctx[command_id].occupied = true;
200 return &queue->comp_ctx[command_id];
203 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
204 struct ena_admin_aq_entry *cmd,
205 size_t cmd_size_in_bytes,
206 struct ena_admin_acq_entry *comp,
207 size_t comp_size_in_bytes)
209 struct ena_comp_ctx *comp_ctx;
210 u16 tail_masked, cmd_id;
214 queue_size_mask = admin_queue->q_depth - 1;
216 tail_masked = admin_queue->sq.tail & queue_size_mask;
218 /* In case of queue FULL */
219 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
220 if (cnt >= admin_queue->q_depth) {
221 ena_trc_dbg("admin queue is full.\n");
222 admin_queue->stats.out_of_space++;
223 return ERR_PTR(ENA_COM_NO_SPACE);
226 cmd_id = admin_queue->curr_cmd_id;
228 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
229 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
231 cmd->aq_common_descriptor.command_id |= cmd_id &
232 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
234 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
235 if (unlikely(!comp_ctx))
236 return ERR_PTR(ENA_COM_INVAL);
238 comp_ctx->status = ENA_CMD_SUBMITTED;
239 comp_ctx->comp_size = (u32)comp_size_in_bytes;
240 comp_ctx->user_cqe = comp;
241 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
243 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
245 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
247 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
250 admin_queue->sq.tail++;
251 admin_queue->stats.submitted_cmd++;
253 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
254 admin_queue->sq.phase = !admin_queue->sq.phase;
256 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
257 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
258 admin_queue->sq.db_addr);
263 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
265 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
266 struct ena_comp_ctx *comp_ctx;
269 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
270 if (unlikely(!queue->comp_ctx)) {
271 ena_trc_err("memory allocation failed\n");
272 return ENA_COM_NO_MEM;
275 for (i = 0; i < queue->q_depth; i++) {
276 comp_ctx = get_comp_ctxt(queue, i, false);
278 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
284 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
285 struct ena_admin_aq_entry *cmd,
286 size_t cmd_size_in_bytes,
287 struct ena_admin_acq_entry *comp,
288 size_t comp_size_in_bytes)
290 unsigned long flags = 0;
291 struct ena_comp_ctx *comp_ctx;
293 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
294 if (unlikely(!admin_queue->running_state)) {
295 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
296 return ERR_PTR(ENA_COM_NO_DEVICE);
298 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
302 if (IS_ERR(comp_ctx))
303 admin_queue->running_state = false;
304 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
309 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
310 struct ena_com_create_io_ctx *ctx,
311 struct ena_com_io_sq *io_sq)
316 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
318 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
319 io_sq->desc_entry_size =
320 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
321 sizeof(struct ena_eth_io_tx_desc) :
322 sizeof(struct ena_eth_io_rx_desc);
324 size = io_sq->desc_entry_size * io_sq->q_depth;
325 io_sq->bus = ena_dev->bus;
327 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
328 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
330 io_sq->desc_addr.virt_addr,
331 io_sq->desc_addr.phys_addr,
332 io_sq->desc_addr.mem_handle,
335 if (!io_sq->desc_addr.virt_addr) {
336 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
338 io_sq->desc_addr.virt_addr,
339 io_sq->desc_addr.phys_addr,
340 io_sq->desc_addr.mem_handle);
343 if (!io_sq->desc_addr.virt_addr) {
344 ena_trc_err("memory allocation failed\n");
345 return ENA_COM_NO_MEM;
349 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
350 /* Allocate bounce buffers */
351 io_sq->bounce_buf_ctrl.buffer_size =
352 ena_dev->llq_info.desc_list_entry_size;
353 io_sq->bounce_buf_ctrl.buffers_num =
354 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
355 io_sq->bounce_buf_ctrl.next_to_use = 0;
357 size = io_sq->bounce_buf_ctrl.buffer_size *
358 io_sq->bounce_buf_ctrl.buffers_num;
360 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
362 io_sq->bounce_buf_ctrl.base_buffer,
365 if (!io_sq->bounce_buf_ctrl.base_buffer)
366 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
368 if (!io_sq->bounce_buf_ctrl.base_buffer) {
369 ena_trc_err("bounce buffer memory allocation failed\n");
370 return ENA_COM_NO_MEM;
373 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
374 sizeof(io_sq->llq_info));
376 /* Initiate the first bounce buffer */
377 io_sq->llq_buf_ctrl.curr_bounce_buf =
378 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
379 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
380 0x0, io_sq->llq_info.desc_list_entry_size);
381 io_sq->llq_buf_ctrl.descs_left_in_line =
382 io_sq->llq_info.descs_num_before_header;
383 io_sq->disable_meta_caching =
384 io_sq->llq_info.disable_meta_caching;
386 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
387 io_sq->entries_in_tx_burst_left =
388 io_sq->llq_info.max_entries_in_tx_burst;
392 io_sq->next_to_comp = 0;
398 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
399 struct ena_com_create_io_ctx *ctx,
400 struct ena_com_io_cq *io_cq)
405 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
407 /* Use the basic completion descriptor for Rx */
408 io_cq->cdesc_entry_size_in_bytes =
409 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
410 sizeof(struct ena_eth_io_tx_cdesc) :
411 sizeof(struct ena_eth_io_rx_cdesc_base);
413 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
414 io_cq->bus = ena_dev->bus;
416 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
418 io_cq->cdesc_addr.virt_addr,
419 io_cq->cdesc_addr.phys_addr,
420 io_cq->cdesc_addr.mem_handle,
423 if (!io_cq->cdesc_addr.virt_addr) {
424 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
426 io_cq->cdesc_addr.virt_addr,
427 io_cq->cdesc_addr.phys_addr,
428 io_cq->cdesc_addr.mem_handle);
431 if (!io_cq->cdesc_addr.virt_addr) {
432 ena_trc_err("memory allocation failed\n");
433 return ENA_COM_NO_MEM;
442 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
443 struct ena_admin_acq_entry *cqe)
445 struct ena_comp_ctx *comp_ctx;
448 cmd_id = cqe->acq_common_descriptor.command &
449 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
451 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
452 if (unlikely(!comp_ctx)) {
453 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
454 admin_queue->running_state = false;
458 comp_ctx->status = ENA_CMD_COMPLETED;
459 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
461 if (comp_ctx->user_cqe)
462 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
464 if (!admin_queue->polling)
465 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
468 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
470 struct ena_admin_acq_entry *cqe = NULL;
475 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
476 phase = admin_queue->cq.phase;
478 cqe = &admin_queue->cq.entries[head_masked];
480 /* Go over all the completions */
481 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
482 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
483 /* Do not read the rest of the completion entry before the
484 * phase bit was validated
487 ena_com_handle_single_admin_completion(admin_queue, cqe);
491 if (unlikely(head_masked == admin_queue->q_depth)) {
496 cqe = &admin_queue->cq.entries[head_masked];
499 admin_queue->cq.head += comp_num;
500 admin_queue->cq.phase = phase;
501 admin_queue->sq.head += comp_num;
502 admin_queue->stats.completed_cmd += comp_num;
505 static int ena_com_comp_status_to_errno(u8 comp_status)
507 if (unlikely(comp_status != 0))
508 ena_trc_err("admin command failed[%u]\n", comp_status);
510 switch (comp_status) {
511 case ENA_ADMIN_SUCCESS:
513 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
514 return ENA_COM_NO_MEM;
515 case ENA_ADMIN_UNSUPPORTED_OPCODE:
516 return ENA_COM_UNSUPPORTED;
517 case ENA_ADMIN_BAD_OPCODE:
518 case ENA_ADMIN_MALFORMED_REQUEST:
519 case ENA_ADMIN_ILLEGAL_PARAMETER:
520 case ENA_ADMIN_UNKNOWN_ERROR:
521 return ENA_COM_INVAL;
522 case ENA_ADMIN_RESOURCE_BUSY:
523 return ENA_COM_TRY_AGAIN;
526 return ENA_COM_INVAL;
529 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
531 delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
532 delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
533 ENA_USLEEP(delay_us);
536 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
537 struct ena_com_admin_queue *admin_queue)
539 unsigned long flags = 0;
544 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
547 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
548 ena_com_handle_admin_completion(admin_queue);
549 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
551 if (comp_ctx->status != ENA_CMD_SUBMITTED)
554 if (ENA_TIME_EXPIRE(timeout)) {
555 ena_trc_err("Wait for completion (polling) timeout\n");
556 /* ENA didn't have any completion */
557 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
558 admin_queue->stats.no_completion++;
559 admin_queue->running_state = false;
560 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
562 ret = ENA_COM_TIMER_EXPIRED;
566 ena_delay_exponential_backoff_us(exp++,
567 admin_queue->ena_dev->ena_min_poll_delay_us);
570 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
571 ena_trc_err("Command was aborted\n");
572 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
573 admin_queue->stats.aborted_cmd++;
574 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
575 ret = ENA_COM_NO_DEVICE;
579 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
580 "Invalid comp status %d\n", comp_ctx->status);
582 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
584 comp_ctxt_release(admin_queue, comp_ctx);
589 * Set the LLQ configurations of the firmware
591 * The driver provides only the enabled feature values to the device,
592 * which in turn, checks if they are supported.
594 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
596 struct ena_com_admin_queue *admin_queue;
597 struct ena_admin_set_feat_cmd cmd;
598 struct ena_admin_set_feat_resp resp;
599 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
602 memset(&cmd, 0x0, sizeof(cmd));
603 admin_queue = &ena_dev->admin_queue;
605 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
606 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
608 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
609 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
610 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
611 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
613 cmd.u.llq.accel_mode.u.set.enabled_flags =
614 BIT(ENA_ADMIN_DISABLE_META_CACHING) |
615 BIT(ENA_ADMIN_LIMIT_TX_BURST);
617 ret = ena_com_execute_admin_command(admin_queue,
618 (struct ena_admin_aq_entry *)&cmd,
620 (struct ena_admin_acq_entry *)&resp,
624 ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
629 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
630 struct ena_admin_feature_llq_desc *llq_features,
631 struct ena_llq_configurations *llq_default_cfg)
633 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
634 struct ena_admin_accel_mode_get llq_accel_mode_get;
638 memset(llq_info, 0, sizeof(*llq_info));
640 supported_feat = llq_features->header_location_ctrl_supported;
642 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
643 llq_info->header_location_ctrl =
644 llq_default_cfg->llq_header_location;
646 ena_trc_err("Invalid header location control, supported: 0x%x\n",
651 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
652 supported_feat = llq_features->descriptors_stride_ctrl_supported;
653 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
654 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
656 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
657 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
658 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
659 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
661 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
666 ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
667 llq_default_cfg->llq_stride_ctrl,
669 llq_info->desc_stride_ctrl);
672 llq_info->desc_stride_ctrl = 0;
675 supported_feat = llq_features->entry_size_ctrl_supported;
676 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
677 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
678 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
680 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
681 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
682 llq_info->desc_list_entry_size = 128;
683 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
684 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
685 llq_info->desc_list_entry_size = 192;
686 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
687 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
688 llq_info->desc_list_entry_size = 256;
690 ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
694 ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
695 llq_default_cfg->llq_ring_entry_size,
697 llq_info->desc_list_entry_size);
699 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
700 /* The desc list entry size should be whole multiply of 8
701 * This requirement comes from __iowrite64_copy()
703 ena_trc_err("illegal entry size %d\n",
704 llq_info->desc_list_entry_size);
708 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
709 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
710 sizeof(struct ena_eth_io_tx_desc);
712 llq_info->descs_per_entry = 1;
714 supported_feat = llq_features->desc_num_before_header_supported;
715 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
716 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
718 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
719 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
720 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
721 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
722 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
723 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
724 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
725 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
727 ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
732 ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
733 llq_default_cfg->llq_num_decs_before_header,
735 llq_info->descs_num_before_header);
737 /* Check for accelerated queue supported */
738 llq_accel_mode_get = llq_features->accel_mode.u.get;
740 llq_info->disable_meta_caching =
741 !!(llq_accel_mode_get.supported_flags &
742 BIT(ENA_ADMIN_DISABLE_META_CACHING));
744 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
745 llq_info->max_entries_in_tx_burst =
746 llq_accel_mode_get.max_tx_burst_size /
747 llq_default_cfg->llq_ring_entry_size_value;
749 rc = ena_com_set_llq(ena_dev);
751 ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
756 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
757 struct ena_com_admin_queue *admin_queue)
759 unsigned long flags = 0;
762 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
763 admin_queue->completion_timeout);
765 /* In case the command wasn't completed find out the root cause.
766 * There might be 2 kinds of errors
767 * 1) No completion (timeout reached)
768 * 2) There is completion but the device didn't get any msi-x interrupt.
770 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
771 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
772 ena_com_handle_admin_completion(admin_queue);
773 admin_queue->stats.no_completion++;
774 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
776 if (comp_ctx->status == ENA_CMD_COMPLETED) {
777 ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
778 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
779 /* Check if fallback to polling is enabled */
780 if (admin_queue->auto_polling)
781 admin_queue->polling = true;
783 ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
784 comp_ctx->cmd_opcode, comp_ctx->status);
786 /* Check if shifted to polling mode.
787 * This will happen if there is a completion without an interrupt
788 * and autopolling mode is enabled. Continuing normal execution in such case
790 if (!admin_queue->polling) {
791 admin_queue->running_state = false;
792 ret = ENA_COM_TIMER_EXPIRED;
797 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
799 comp_ctxt_release(admin_queue, comp_ctx);
803 /* This method read the hardware device register through posting writes
804 * and waiting for response
805 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
807 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
809 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
810 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
811 mmio_read->read_resp;
812 u32 mmio_read_reg, ret, i;
813 unsigned long flags = 0;
814 u32 timeout = mmio_read->reg_read_to;
819 timeout = ENA_REG_READ_TIMEOUT;
821 /* If readless is disabled, perform regular read */
822 if (!mmio_read->readless_supported)
823 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
825 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
826 mmio_read->seq_num++;
828 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
829 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
830 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
831 mmio_read_reg |= mmio_read->seq_num &
832 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
834 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
835 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
837 for (i = 0; i < timeout; i++) {
838 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
844 if (unlikely(i == timeout)) {
845 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
850 ret = ENA_MMIO_READ_TIMEOUT;
854 if (read_resp->reg_off != offset) {
855 ena_trc_err("Read failure: wrong offset provided\n");
856 ret = ENA_MMIO_READ_TIMEOUT;
858 ret = read_resp->reg_val;
861 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
866 /* There are two types to wait for completion.
867 * Polling mode - wait until the completion is available.
868 * Async mode - wait on wait queue until the completion is ready
869 * (or the timeout expired).
870 * It is expected that the IRQ called ena_com_handle_admin_completion
871 * to mark the completions.
873 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
874 struct ena_com_admin_queue *admin_queue)
876 if (admin_queue->polling)
877 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
880 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
884 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
885 struct ena_com_io_sq *io_sq)
887 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
888 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
889 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
893 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
895 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
896 direction = ENA_ADMIN_SQ_DIRECTION_TX;
898 direction = ENA_ADMIN_SQ_DIRECTION_RX;
900 destroy_cmd.sq.sq_identity |= (direction <<
901 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
902 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
904 destroy_cmd.sq.sq_idx = io_sq->idx;
905 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
907 ret = ena_com_execute_admin_command(admin_queue,
908 (struct ena_admin_aq_entry *)&destroy_cmd,
910 (struct ena_admin_acq_entry *)&destroy_resp,
911 sizeof(destroy_resp));
913 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
914 ena_trc_err("failed to destroy io sq error: %d\n", ret);
919 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
920 struct ena_com_io_sq *io_sq,
921 struct ena_com_io_cq *io_cq)
925 if (io_cq->cdesc_addr.virt_addr) {
926 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
928 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
930 io_cq->cdesc_addr.virt_addr,
931 io_cq->cdesc_addr.phys_addr,
932 io_cq->cdesc_addr.mem_handle);
934 io_cq->cdesc_addr.virt_addr = NULL;
937 if (io_sq->desc_addr.virt_addr) {
938 size = io_sq->desc_entry_size * io_sq->q_depth;
940 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
942 io_sq->desc_addr.virt_addr,
943 io_sq->desc_addr.phys_addr,
944 io_sq->desc_addr.mem_handle);
946 io_sq->desc_addr.virt_addr = NULL;
949 if (io_sq->bounce_buf_ctrl.base_buffer) {
950 ENA_MEM_FREE(ena_dev->dmadev,
951 io_sq->bounce_buf_ctrl.base_buffer,
952 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
953 io_sq->bounce_buf_ctrl.base_buffer = NULL;
957 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
961 ena_time_t timeout_stamp;
963 /* Convert timeout from resolution of 100ms to us resolution. */
964 timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
967 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
969 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
970 ena_trc_err("Reg read timeout occurred\n");
971 return ENA_COM_TIMER_EXPIRED;
974 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
978 if (ENA_TIME_EXPIRE(timeout_stamp))
979 return ENA_COM_TIMER_EXPIRED;
981 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
985 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
986 enum ena_admin_aq_feature_id feature_id)
988 u32 feature_mask = 1 << feature_id;
990 /* Device attributes is always supported */
991 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
992 !(ena_dev->supported_features & feature_mask))
998 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
999 struct ena_admin_get_feat_resp *get_resp,
1000 enum ena_admin_aq_feature_id feature_id,
1001 dma_addr_t control_buf_dma_addr,
1002 u32 control_buff_size,
1005 struct ena_com_admin_queue *admin_queue;
1006 struct ena_admin_get_feat_cmd get_cmd;
1009 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1010 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
1011 return ENA_COM_UNSUPPORTED;
1014 memset(&get_cmd, 0x0, sizeof(get_cmd));
1015 admin_queue = &ena_dev->admin_queue;
1017 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1019 if (control_buff_size)
1020 get_cmd.aq_common_descriptor.flags =
1021 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1023 get_cmd.aq_common_descriptor.flags = 0;
1025 ret = ena_com_mem_addr_set(ena_dev,
1026 &get_cmd.control_buffer.address,
1027 control_buf_dma_addr);
1028 if (unlikely(ret)) {
1029 ena_trc_err("memory address set failed\n");
1033 get_cmd.control_buffer.length = control_buff_size;
1034 get_cmd.feat_common.feature_version = feature_ver;
1035 get_cmd.feat_common.feature_id = feature_id;
1037 ret = ena_com_execute_admin_command(admin_queue,
1038 (struct ena_admin_aq_entry *)
1041 (struct ena_admin_acq_entry *)
1046 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
1052 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1053 struct ena_admin_get_feat_resp *get_resp,
1054 enum ena_admin_aq_feature_id feature_id,
1057 return ena_com_get_feature_ex(ena_dev,
1065 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1067 return ena_dev->rss.hash_func;
1070 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1072 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1073 (ena_dev->rss).hash_key;
1075 ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1076 /* The key buffer is stored in the device in an array of
1079 hash_key->keys_num = ENA_ADMIN_RSS_KEY_PARTS;
1082 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1084 struct ena_rss *rss = &ena_dev->rss;
1086 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1087 return ENA_COM_UNSUPPORTED;
1089 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1090 sizeof(*rss->hash_key),
1092 rss->hash_key_dma_addr,
1093 rss->hash_key_mem_handle);
1095 if (unlikely(!rss->hash_key))
1096 return ENA_COM_NO_MEM;
1101 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1103 struct ena_rss *rss = &ena_dev->rss;
1106 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1107 sizeof(*rss->hash_key),
1109 rss->hash_key_dma_addr,
1110 rss->hash_key_mem_handle);
1111 rss->hash_key = NULL;
1114 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1116 struct ena_rss *rss = &ena_dev->rss;
1118 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1119 sizeof(*rss->hash_ctrl),
1121 rss->hash_ctrl_dma_addr,
1122 rss->hash_ctrl_mem_handle);
1124 if (unlikely(!rss->hash_ctrl))
1125 return ENA_COM_NO_MEM;
1130 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1132 struct ena_rss *rss = &ena_dev->rss;
1135 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1136 sizeof(*rss->hash_ctrl),
1138 rss->hash_ctrl_dma_addr,
1139 rss->hash_ctrl_mem_handle);
1140 rss->hash_ctrl = NULL;
1143 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1146 struct ena_rss *rss = &ena_dev->rss;
1147 struct ena_admin_get_feat_resp get_resp;
1151 ret = ena_com_get_feature(ena_dev, &get_resp,
1152 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1156 if ((get_resp.u.ind_table.min_size > log_size) ||
1157 (get_resp.u.ind_table.max_size < log_size)) {
1158 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1160 1 << get_resp.u.ind_table.min_size,
1161 1 << get_resp.u.ind_table.max_size);
1162 return ENA_COM_INVAL;
1165 tbl_size = (1ULL << log_size) *
1166 sizeof(struct ena_admin_rss_ind_table_entry);
1168 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1171 rss->rss_ind_tbl_dma_addr,
1172 rss->rss_ind_tbl_mem_handle);
1173 if (unlikely(!rss->rss_ind_tbl))
1176 tbl_size = (1ULL << log_size) * sizeof(u16);
1177 rss->host_rss_ind_tbl =
1178 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1179 if (unlikely(!rss->host_rss_ind_tbl))
1182 rss->tbl_log_size = log_size;
1187 tbl_size = (1ULL << log_size) *
1188 sizeof(struct ena_admin_rss_ind_table_entry);
1190 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1193 rss->rss_ind_tbl_dma_addr,
1194 rss->rss_ind_tbl_mem_handle);
1195 rss->rss_ind_tbl = NULL;
1197 rss->tbl_log_size = 0;
1198 return ENA_COM_NO_MEM;
1201 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1203 struct ena_rss *rss = &ena_dev->rss;
1204 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1205 sizeof(struct ena_admin_rss_ind_table_entry);
1207 if (rss->rss_ind_tbl)
1208 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1211 rss->rss_ind_tbl_dma_addr,
1212 rss->rss_ind_tbl_mem_handle);
1213 rss->rss_ind_tbl = NULL;
1215 if (rss->host_rss_ind_tbl)
1216 ENA_MEM_FREE(ena_dev->dmadev,
1217 rss->host_rss_ind_tbl,
1218 ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1219 rss->host_rss_ind_tbl = NULL;
1222 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1223 struct ena_com_io_sq *io_sq, u16 cq_idx)
1225 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1226 struct ena_admin_aq_create_sq_cmd create_cmd;
1227 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1231 memset(&create_cmd, 0x0, sizeof(create_cmd));
1233 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1235 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1236 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1238 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1240 create_cmd.sq_identity |= (direction <<
1241 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1242 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1244 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1245 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1247 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1248 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1249 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1251 create_cmd.sq_caps_3 |=
1252 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1254 create_cmd.cq_idx = cq_idx;
1255 create_cmd.sq_depth = io_sq->q_depth;
1257 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1258 ret = ena_com_mem_addr_set(ena_dev,
1260 io_sq->desc_addr.phys_addr);
1261 if (unlikely(ret)) {
1262 ena_trc_err("memory address set failed\n");
1267 ret = ena_com_execute_admin_command(admin_queue,
1268 (struct ena_admin_aq_entry *)&create_cmd,
1270 (struct ena_admin_acq_entry *)&cmd_completion,
1271 sizeof(cmd_completion));
1272 if (unlikely(ret)) {
1273 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1277 io_sq->idx = cmd_completion.sq_idx;
1279 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1280 (uintptr_t)cmd_completion.sq_doorbell_offset);
1282 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1283 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1284 + cmd_completion.llq_headers_offset);
1286 io_sq->desc_addr.pbuf_dev_addr =
1287 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1288 cmd_completion.llq_descriptors_offset);
1291 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1296 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1298 struct ena_rss *rss = &ena_dev->rss;
1299 struct ena_com_io_sq *io_sq;
1303 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1304 qid = rss->host_rss_ind_tbl[i];
1305 if (qid >= ENA_TOTAL_NUM_QUEUES)
1306 return ENA_COM_INVAL;
1308 io_sq = &ena_dev->io_sq_queues[qid];
1310 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1311 return ENA_COM_INVAL;
1313 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1319 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1320 u16 intr_delay_resolution)
1322 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1324 if (unlikely(!intr_delay_resolution)) {
1325 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1326 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1330 ena_dev->intr_moder_rx_interval =
1331 ena_dev->intr_moder_rx_interval *
1332 prev_intr_delay_resolution /
1333 intr_delay_resolution;
1336 ena_dev->intr_moder_tx_interval =
1337 ena_dev->intr_moder_tx_interval *
1338 prev_intr_delay_resolution /
1339 intr_delay_resolution;
1341 ena_dev->intr_delay_resolution = intr_delay_resolution;
1344 /*****************************************************************************/
1345 /******************************* API ******************************/
1346 /*****************************************************************************/
1348 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1349 struct ena_admin_aq_entry *cmd,
1351 struct ena_admin_acq_entry *comp,
1354 struct ena_comp_ctx *comp_ctx;
1357 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1359 if (IS_ERR(comp_ctx)) {
1360 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1361 ena_trc_dbg("Failed to submit command [%ld]\n",
1364 ena_trc_err("Failed to submit command [%ld]\n",
1367 return PTR_ERR(comp_ctx);
1370 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1371 if (unlikely(ret)) {
1372 if (admin_queue->running_state)
1373 ena_trc_err("Failed to process command. ret = %d\n",
1376 ena_trc_dbg("Failed to process command. ret = %d\n",
1382 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1383 struct ena_com_io_cq *io_cq)
1385 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1386 struct ena_admin_aq_create_cq_cmd create_cmd;
1387 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1390 memset(&create_cmd, 0x0, sizeof(create_cmd));
1392 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1394 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1395 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1396 create_cmd.cq_caps_1 |=
1397 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1399 create_cmd.msix_vector = io_cq->msix_vector;
1400 create_cmd.cq_depth = io_cq->q_depth;
1402 ret = ena_com_mem_addr_set(ena_dev,
1404 io_cq->cdesc_addr.phys_addr);
1405 if (unlikely(ret)) {
1406 ena_trc_err("memory address set failed\n");
1410 ret = ena_com_execute_admin_command(admin_queue,
1411 (struct ena_admin_aq_entry *)&create_cmd,
1413 (struct ena_admin_acq_entry *)&cmd_completion,
1414 sizeof(cmd_completion));
1415 if (unlikely(ret)) {
1416 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1420 io_cq->idx = cmd_completion.cq_idx;
1422 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1423 cmd_completion.cq_interrupt_unmask_register_offset);
1425 if (cmd_completion.cq_head_db_register_offset)
1426 io_cq->cq_head_db_reg =
1427 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1428 cmd_completion.cq_head_db_register_offset);
1430 if (cmd_completion.numa_node_register_offset)
1431 io_cq->numa_node_cfg_reg =
1432 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1433 cmd_completion.numa_node_register_offset);
1435 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1440 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1441 struct ena_com_io_sq **io_sq,
1442 struct ena_com_io_cq **io_cq)
1444 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1445 ena_trc_err("Invalid queue number %d but the max is %d\n",
1446 qid, ENA_TOTAL_NUM_QUEUES);
1447 return ENA_COM_INVAL;
1450 *io_sq = &ena_dev->io_sq_queues[qid];
1451 *io_cq = &ena_dev->io_cq_queues[qid];
1456 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1458 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1459 struct ena_comp_ctx *comp_ctx;
1462 if (!admin_queue->comp_ctx)
1465 for (i = 0; i < admin_queue->q_depth; i++) {
1466 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1467 if (unlikely(!comp_ctx))
1470 comp_ctx->status = ENA_CMD_ABORTED;
1472 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1476 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1478 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1479 unsigned long flags = 0;
1482 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1483 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1484 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1485 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1486 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1488 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1491 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1492 struct ena_com_io_cq *io_cq)
1494 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1495 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1496 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1499 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1501 destroy_cmd.cq_idx = io_cq->idx;
1502 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1504 ret = ena_com_execute_admin_command(admin_queue,
1505 (struct ena_admin_aq_entry *)&destroy_cmd,
1506 sizeof(destroy_cmd),
1507 (struct ena_admin_acq_entry *)&destroy_resp,
1508 sizeof(destroy_resp));
1510 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1511 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1516 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1518 return ena_dev->admin_queue.running_state;
1521 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1523 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1524 unsigned long flags = 0;
1526 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1527 ena_dev->admin_queue.running_state = state;
1528 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1531 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1533 u16 depth = ena_dev->aenq.q_depth;
1535 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1537 /* Init head_db to mark that all entries in the queue
1538 * are initially available
1540 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1543 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1545 struct ena_com_admin_queue *admin_queue;
1546 struct ena_admin_set_feat_cmd cmd;
1547 struct ena_admin_set_feat_resp resp;
1548 struct ena_admin_get_feat_resp get_resp;
1551 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1553 ena_trc_info("Can't get aenq configuration\n");
1557 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1558 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1559 get_resp.u.aenq.supported_groups,
1561 return ENA_COM_UNSUPPORTED;
1564 memset(&cmd, 0x0, sizeof(cmd));
1565 admin_queue = &ena_dev->admin_queue;
1567 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1568 cmd.aq_common_descriptor.flags = 0;
1569 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1570 cmd.u.aenq.enabled_groups = groups_flag;
1572 ret = ena_com_execute_admin_command(admin_queue,
1573 (struct ena_admin_aq_entry *)&cmd,
1575 (struct ena_admin_acq_entry *)&resp,
1579 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1584 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1586 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1589 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1590 ena_trc_err("Reg read timeout occurred\n");
1591 return ENA_COM_TIMER_EXPIRED;
1594 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1595 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1597 ena_trc_dbg("ENA dma width: %d\n", width);
1599 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1600 ena_trc_err("DMA width illegal value: %d\n", width);
1601 return ENA_COM_INVAL;
1604 ena_dev->dma_addr_bits = width;
1609 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1613 u32 ctrl_ver_masked;
1615 /* Make sure the ENA version and the controller version are at least
1616 * as the driver expects
1618 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1619 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1620 ENA_REGS_CONTROLLER_VERSION_OFF);
1622 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1623 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1624 ena_trc_err("Reg read timeout occurred\n");
1625 return ENA_COM_TIMER_EXPIRED;
1628 ena_trc_info("ena device version: %d.%d\n",
1629 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1630 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1631 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1633 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1634 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1635 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1636 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1637 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1638 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1639 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1640 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1643 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1644 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1645 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1647 /* Validate the ctrl version without the implementation ID */
1648 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1649 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1656 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1658 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1659 struct ena_com_admin_cq *cq = &admin_queue->cq;
1660 struct ena_com_admin_sq *sq = &admin_queue->sq;
1661 struct ena_com_aenq *aenq = &ena_dev->aenq;
1664 if (admin_queue->comp_ctx) {
1665 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1666 ENA_MEM_FREE(ena_dev->dmadev,
1667 admin_queue->comp_ctx,
1668 (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1671 admin_queue->comp_ctx = NULL;
1672 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1674 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1675 sq->dma_addr, sq->mem_handle);
1678 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1680 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1681 cq->dma_addr, cq->mem_handle);
1684 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1685 if (ena_dev->aenq.entries)
1686 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1687 aenq->dma_addr, aenq->mem_handle);
1688 aenq->entries = NULL;
1689 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1692 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1697 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1699 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1700 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1701 ena_dev->admin_queue.polling = polling;
1704 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
1706 return ena_dev->admin_queue.polling;
1709 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1712 ena_dev->admin_queue.auto_polling = polling;
1715 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1717 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1719 ENA_SPINLOCK_INIT(mmio_read->lock);
1720 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1721 sizeof(*mmio_read->read_resp),
1722 mmio_read->read_resp,
1723 mmio_read->read_resp_dma_addr,
1724 mmio_read->read_resp_mem_handle);
1725 if (unlikely(!mmio_read->read_resp))
1728 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1730 mmio_read->read_resp->req_id = 0x0;
1731 mmio_read->seq_num = 0x0;
1732 mmio_read->readless_supported = true;
1737 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1738 return ENA_COM_NO_MEM;
1741 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1743 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1745 mmio_read->readless_supported = readless_supported;
1748 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1750 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1752 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1753 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1755 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1756 sizeof(*mmio_read->read_resp),
1757 mmio_read->read_resp,
1758 mmio_read->read_resp_dma_addr,
1759 mmio_read->read_resp_mem_handle);
1761 mmio_read->read_resp = NULL;
1762 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1765 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1767 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1768 u32 addr_low, addr_high;
1770 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1771 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1773 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1774 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1777 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1778 struct ena_aenq_handlers *aenq_handlers)
1780 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1781 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1784 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1786 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1787 ena_trc_err("Reg read timeout occurred\n");
1788 return ENA_COM_TIMER_EXPIRED;
1791 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1792 ena_trc_err("Device isn't ready, abort com init\n");
1793 return ENA_COM_NO_DEVICE;
1796 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1798 admin_queue->bus = ena_dev->bus;
1799 admin_queue->q_dmadev = ena_dev->dmadev;
1800 admin_queue->polling = false;
1801 admin_queue->curr_cmd_id = 0;
1803 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1805 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1807 ret = ena_com_init_comp_ctxt(admin_queue);
1811 ret = ena_com_admin_init_sq(admin_queue);
1815 ret = ena_com_admin_init_cq(admin_queue);
1819 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1820 ENA_REGS_AQ_DB_OFF);
1822 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1823 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1825 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1826 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1828 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1829 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1831 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1832 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1835 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1836 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1837 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1838 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1841 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1842 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1843 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1844 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1846 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1847 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1848 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1852 admin_queue->ena_dev = ena_dev;
1853 admin_queue->running_state = true;
1857 ena_com_admin_destroy(ena_dev);
1862 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1863 struct ena_com_create_io_ctx *ctx)
1865 struct ena_com_io_sq *io_sq;
1866 struct ena_com_io_cq *io_cq;
1869 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1870 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1871 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1872 return ENA_COM_INVAL;
1875 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1876 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1878 memset(io_sq, 0x0, sizeof(*io_sq));
1879 memset(io_cq, 0x0, sizeof(*io_cq));
1882 io_cq->q_depth = ctx->queue_size;
1883 io_cq->direction = ctx->direction;
1884 io_cq->qid = ctx->qid;
1886 io_cq->msix_vector = ctx->msix_vector;
1888 io_sq->q_depth = ctx->queue_size;
1889 io_sq->direction = ctx->direction;
1890 io_sq->qid = ctx->qid;
1892 io_sq->mem_queue_type = ctx->mem_queue_type;
1894 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1895 /* header length is limited to 8 bits */
1896 io_sq->tx_max_header_size =
1897 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1899 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1902 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1906 ret = ena_com_create_io_cq(ena_dev, io_cq);
1910 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1917 ena_com_destroy_io_cq(ena_dev, io_cq);
1919 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1923 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1925 struct ena_com_io_sq *io_sq;
1926 struct ena_com_io_cq *io_cq;
1928 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1929 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1930 qid, ENA_TOTAL_NUM_QUEUES);
1934 io_sq = &ena_dev->io_sq_queues[qid];
1935 io_cq = &ena_dev->io_cq_queues[qid];
1937 ena_com_destroy_io_sq(ena_dev, io_sq);
1938 ena_com_destroy_io_cq(ena_dev, io_cq);
1940 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1943 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1944 struct ena_admin_get_feat_resp *resp)
1946 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1949 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1950 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1952 struct ena_admin_get_feat_resp get_resp;
1955 rc = ena_com_get_feature(ena_dev, &get_resp,
1956 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1960 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1961 sizeof(get_resp.u.dev_attr));
1962 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1964 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1965 rc = ena_com_get_feature(ena_dev, &get_resp,
1966 ENA_ADMIN_MAX_QUEUES_EXT,
1967 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1971 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1974 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1975 sizeof(get_resp.u.max_queue_ext));
1976 ena_dev->tx_max_header_size =
1977 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1979 rc = ena_com_get_feature(ena_dev, &get_resp,
1980 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1981 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1982 sizeof(get_resp.u.max_queue));
1983 ena_dev->tx_max_header_size =
1984 get_resp.u.max_queue.max_header_size;
1990 rc = ena_com_get_feature(ena_dev, &get_resp,
1991 ENA_ADMIN_AENQ_CONFIG, 0);
1995 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1996 sizeof(get_resp.u.aenq));
1998 rc = ena_com_get_feature(ena_dev, &get_resp,
1999 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2003 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2004 sizeof(get_resp.u.offload));
2006 /* Driver hints isn't mandatory admin command. So in case the
2007 * command isn't supported set driver hints to 0
2009 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2012 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2013 sizeof(get_resp.u.hw_hints));
2014 else if (rc == ENA_COM_UNSUPPORTED)
2015 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2019 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2021 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2022 sizeof(get_resp.u.llq));
2023 else if (rc == ENA_COM_UNSUPPORTED)
2024 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2028 rc = ena_com_get_feature(ena_dev, &get_resp,
2029 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
2031 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
2032 sizeof(get_resp.u.ind_table));
2033 else if (rc == ENA_COM_UNSUPPORTED)
2034 memset(&get_feat_ctx->ind_table, 0x0,
2035 sizeof(get_feat_ctx->ind_table));
2042 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2044 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2047 /* ena_handle_specific_aenq_event:
2048 * return the handler that is relevant to the specific event group
2050 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2053 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2055 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2056 return aenq_handlers->handlers[group];
2058 return aenq_handlers->unimplemented_handler;
2061 /* ena_aenq_intr_handler:
2062 * handles the aenq incoming events.
2063 * pop events from the queue and apply the specific handler
2065 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2067 struct ena_admin_aenq_entry *aenq_e;
2068 struct ena_admin_aenq_common_desc *aenq_common;
2069 struct ena_com_aenq *aenq = &dev->aenq;
2071 ena_aenq_handler handler_cb;
2072 u16 masked_head, processed = 0;
2075 masked_head = aenq->head & (aenq->q_depth - 1);
2076 phase = aenq->phase;
2077 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2078 aenq_common = &aenq_e->aenq_common_desc;
2080 /* Go over all the events */
2081 while ((READ_ONCE8(aenq_common->flags) &
2082 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2083 /* Make sure the phase bit (ownership) is as expected before
2084 * reading the rest of the descriptor.
2088 timestamp = (u64)aenq_common->timestamp_low |
2089 ((u64)aenq_common->timestamp_high << 32);
2090 ENA_TOUCH(timestamp); /* In case debug is disabled */
2091 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2093 aenq_common->syndrom,
2096 /* Handle specific event*/
2097 handler_cb = ena_com_get_specific_aenq_cb(dev,
2098 aenq_common->group);
2099 handler_cb(data, aenq_e); /* call the actual event handler*/
2101 /* Get next event entry */
2105 if (unlikely(masked_head == aenq->q_depth)) {
2109 aenq_e = &aenq->entries[masked_head];
2110 aenq_common = &aenq_e->aenq_common_desc;
2113 aenq->head += processed;
2114 aenq->phase = phase;
2116 /* Don't update aenq doorbell if there weren't any processed events */
2120 /* write the aenq doorbell after all AENQ descriptors were read */
2122 ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
2123 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2127 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2128 enum ena_regs_reset_reason_types reset_reason)
2130 u32 stat, timeout, cap, reset_val;
2133 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2134 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2136 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2137 (cap == ENA_MMIO_READ_TIMEOUT))) {
2138 ena_trc_err("Reg read32 timeout occurred\n");
2139 return ENA_COM_TIMER_EXPIRED;
2142 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2143 ena_trc_err("Device isn't ready, can't reset device\n");
2144 return ENA_COM_INVAL;
2147 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2148 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2150 ena_trc_err("Invalid timeout value\n");
2151 return ENA_COM_INVAL;
2155 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2156 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2157 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2158 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2160 /* Write again the MMIO read request address */
2161 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2163 rc = wait_for_reset_state(ena_dev, timeout,
2164 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2166 ena_trc_err("Reset indication didn't turn on\n");
2171 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2172 rc = wait_for_reset_state(ena_dev, timeout, 0);
2174 ena_trc_err("Reset indication didn't turn off\n");
2178 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2179 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2181 /* the resolution of timeout reg is 100ms */
2182 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2184 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2189 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2190 struct ena_com_stats_ctx *ctx,
2191 enum ena_admin_get_stats_type type)
2193 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2194 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2195 struct ena_com_admin_queue *admin_queue;
2198 admin_queue = &ena_dev->admin_queue;
2200 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2201 get_cmd->aq_common_descriptor.flags = 0;
2202 get_cmd->type = type;
2204 ret = ena_com_execute_admin_command(admin_queue,
2205 (struct ena_admin_aq_entry *)get_cmd,
2207 (struct ena_admin_acq_entry *)get_resp,
2211 ena_trc_err("Failed to get stats. error: %d\n", ret);
2216 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2217 struct ena_admin_eni_stats *stats)
2219 struct ena_com_stats_ctx ctx;
2222 memset(&ctx, 0x0, sizeof(ctx));
2223 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2224 if (likely(ret == 0))
2225 memcpy(stats, &ctx.get_resp.u.eni_stats,
2226 sizeof(ctx.get_resp.u.eni_stats));
2231 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2232 struct ena_admin_basic_stats *stats)
2234 struct ena_com_stats_ctx ctx;
2237 memset(&ctx, 0x0, sizeof(ctx));
2238 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2239 if (likely(ret == 0))
2240 memcpy(stats, &ctx.get_resp.u.basic_stats,
2241 sizeof(ctx.get_resp.u.basic_stats));
2246 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2248 struct ena_com_admin_queue *admin_queue;
2249 struct ena_admin_set_feat_cmd cmd;
2250 struct ena_admin_set_feat_resp resp;
2253 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2254 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2255 return ENA_COM_UNSUPPORTED;
2258 memset(&cmd, 0x0, sizeof(cmd));
2259 admin_queue = &ena_dev->admin_queue;
2261 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2262 cmd.aq_common_descriptor.flags = 0;
2263 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2264 cmd.u.mtu.mtu = mtu;
2266 ret = ena_com_execute_admin_command(admin_queue,
2267 (struct ena_admin_aq_entry *)&cmd,
2269 (struct ena_admin_acq_entry *)&resp,
2273 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2278 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2279 struct ena_admin_feature_offload_desc *offload)
2282 struct ena_admin_get_feat_resp resp;
2284 ret = ena_com_get_feature(ena_dev, &resp,
2285 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2286 if (unlikely(ret)) {
2287 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2291 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2296 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2298 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2299 struct ena_rss *rss = &ena_dev->rss;
2300 struct ena_admin_set_feat_cmd cmd;
2301 struct ena_admin_set_feat_resp resp;
2302 struct ena_admin_get_feat_resp get_resp;
2305 if (!ena_com_check_supported_feature_id(ena_dev,
2306 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2307 ena_trc_dbg("Feature %d isn't supported\n",
2308 ENA_ADMIN_RSS_HASH_FUNCTION);
2309 return ENA_COM_UNSUPPORTED;
2312 /* Validate hash function is supported */
2313 ret = ena_com_get_feature(ena_dev, &get_resp,
2314 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2318 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2319 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2321 return ENA_COM_UNSUPPORTED;
2324 memset(&cmd, 0x0, sizeof(cmd));
2326 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2327 cmd.aq_common_descriptor.flags =
2328 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2329 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2330 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2331 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2333 ret = ena_com_mem_addr_set(ena_dev,
2334 &cmd.control_buffer.address,
2335 rss->hash_key_dma_addr);
2336 if (unlikely(ret)) {
2337 ena_trc_err("memory address set failed\n");
2341 cmd.control_buffer.length = sizeof(*rss->hash_key);
2343 ret = ena_com_execute_admin_command(admin_queue,
2344 (struct ena_admin_aq_entry *)&cmd,
2346 (struct ena_admin_acq_entry *)&resp,
2348 if (unlikely(ret)) {
2349 ena_trc_err("Failed to set hash function %d. error: %d\n",
2350 rss->hash_func, ret);
2351 return ENA_COM_INVAL;
2357 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2358 enum ena_admin_hash_functions func,
2359 const u8 *key, u16 key_len, u32 init_val)
2361 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2362 struct ena_admin_get_feat_resp get_resp;
2363 enum ena_admin_hash_functions old_func;
2364 struct ena_rss *rss = &ena_dev->rss;
2367 hash_key = rss->hash_key;
2369 /* Make sure size is a mult of DWs */
2370 if (unlikely(key_len & 0x3))
2371 return ENA_COM_INVAL;
2373 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2374 ENA_ADMIN_RSS_HASH_FUNCTION,
2375 rss->hash_key_dma_addr,
2376 sizeof(*rss->hash_key), 0);
2380 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2381 ena_trc_err("Flow hash function %d isn't supported\n", func);
2382 return ENA_COM_UNSUPPORTED;
2386 case ENA_ADMIN_TOEPLITZ:
2388 if (key_len != sizeof(hash_key->key)) {
2389 ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2390 key_len, sizeof(hash_key->key));
2391 return ENA_COM_INVAL;
2393 memcpy(hash_key->key, key, key_len);
2394 rss->hash_init_val = init_val;
2395 hash_key->keys_num = key_len / sizeof(u32);
2398 case ENA_ADMIN_CRC32:
2399 rss->hash_init_val = init_val;
2402 ena_trc_err("Invalid hash function (%d)\n", func);
2403 return ENA_COM_INVAL;
2406 old_func = rss->hash_func;
2407 rss->hash_func = func;
2408 rc = ena_com_set_hash_function(ena_dev);
2410 /* Restore the old function */
2412 rss->hash_func = old_func;
2417 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2418 enum ena_admin_hash_functions *func)
2420 struct ena_rss *rss = &ena_dev->rss;
2421 struct ena_admin_get_feat_resp get_resp;
2424 if (unlikely(!func))
2425 return ENA_COM_INVAL;
2427 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2428 ENA_ADMIN_RSS_HASH_FUNCTION,
2429 rss->hash_key_dma_addr,
2430 sizeof(*rss->hash_key), 0);
2434 /* ENA_FFS() returns 1 in case the lsb is set */
2435 rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2439 *func = rss->hash_func;
2444 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2446 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2447 ena_dev->rss.hash_key;
2450 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2455 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2456 enum ena_admin_flow_hash_proto proto,
2459 struct ena_rss *rss = &ena_dev->rss;
2460 struct ena_admin_get_feat_resp get_resp;
2463 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2464 ENA_ADMIN_RSS_HASH_INPUT,
2465 rss->hash_ctrl_dma_addr,
2466 sizeof(*rss->hash_ctrl), 0);
2471 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2476 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2478 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2479 struct ena_rss *rss = &ena_dev->rss;
2480 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2481 struct ena_admin_set_feat_cmd cmd;
2482 struct ena_admin_set_feat_resp resp;
2485 if (!ena_com_check_supported_feature_id(ena_dev,
2486 ENA_ADMIN_RSS_HASH_INPUT)) {
2487 ena_trc_dbg("Feature %d isn't supported\n",
2488 ENA_ADMIN_RSS_HASH_INPUT);
2489 return ENA_COM_UNSUPPORTED;
2492 memset(&cmd, 0x0, sizeof(cmd));
2494 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2495 cmd.aq_common_descriptor.flags =
2496 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2497 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2498 cmd.u.flow_hash_input.enabled_input_sort =
2499 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2500 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2502 ret = ena_com_mem_addr_set(ena_dev,
2503 &cmd.control_buffer.address,
2504 rss->hash_ctrl_dma_addr);
2505 if (unlikely(ret)) {
2506 ena_trc_err("memory address set failed\n");
2509 cmd.control_buffer.length = sizeof(*hash_ctrl);
2511 ret = ena_com_execute_admin_command(admin_queue,
2512 (struct ena_admin_aq_entry *)&cmd,
2514 (struct ena_admin_acq_entry *)&resp,
2517 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2522 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2524 struct ena_rss *rss = &ena_dev->rss;
2525 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2527 u16 available_fields = 0;
2530 /* Get the supported hash input */
2531 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2535 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2536 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2537 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2539 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2540 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2541 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2543 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2544 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2545 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2547 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2548 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2549 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2551 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2552 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2554 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2555 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2557 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2558 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2560 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2561 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2563 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2564 available_fields = hash_ctrl->selected_fields[i].fields &
2565 hash_ctrl->supported_fields[i].fields;
2566 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2567 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2568 i, hash_ctrl->supported_fields[i].fields,
2569 hash_ctrl->selected_fields[i].fields);
2570 return ENA_COM_UNSUPPORTED;
2574 rc = ena_com_set_hash_ctrl(ena_dev);
2576 /* In case of failure, restore the old hash ctrl */
2578 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2583 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2584 enum ena_admin_flow_hash_proto proto,
2587 struct ena_rss *rss = &ena_dev->rss;
2588 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2589 u16 supported_fields;
2592 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2593 ena_trc_err("Invalid proto num (%u)\n", proto);
2594 return ENA_COM_INVAL;
2597 /* Get the ctrl table */
2598 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2602 /* Make sure all the fields are supported */
2603 supported_fields = hash_ctrl->supported_fields[proto].fields;
2604 if ((hash_fields & supported_fields) != hash_fields) {
2605 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2606 proto, hash_fields, supported_fields);
2609 hash_ctrl->selected_fields[proto].fields = hash_fields;
2611 rc = ena_com_set_hash_ctrl(ena_dev);
2613 /* In case of failure, restore the old hash ctrl */
2615 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2620 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2621 u16 entry_idx, u16 entry_value)
2623 struct ena_rss *rss = &ena_dev->rss;
2625 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2626 return ENA_COM_INVAL;
2628 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2629 return ENA_COM_INVAL;
2631 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2636 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2638 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2639 struct ena_rss *rss = &ena_dev->rss;
2640 struct ena_admin_set_feat_cmd cmd;
2641 struct ena_admin_set_feat_resp resp;
2644 if (!ena_com_check_supported_feature_id(ena_dev,
2645 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2646 ena_trc_dbg("Feature %d isn't supported\n",
2647 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2648 return ENA_COM_UNSUPPORTED;
2651 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2653 ena_trc_err("Failed to convert host indirection table to device table\n");
2657 memset(&cmd, 0x0, sizeof(cmd));
2659 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2660 cmd.aq_common_descriptor.flags =
2661 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2662 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2663 cmd.u.ind_table.size = rss->tbl_log_size;
2664 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2666 ret = ena_com_mem_addr_set(ena_dev,
2667 &cmd.control_buffer.address,
2668 rss->rss_ind_tbl_dma_addr);
2669 if (unlikely(ret)) {
2670 ena_trc_err("memory address set failed\n");
2674 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2675 sizeof(struct ena_admin_rss_ind_table_entry);
2677 ret = ena_com_execute_admin_command(admin_queue,
2678 (struct ena_admin_aq_entry *)&cmd,
2680 (struct ena_admin_acq_entry *)&resp,
2684 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2689 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2691 struct ena_rss *rss = &ena_dev->rss;
2692 struct ena_admin_get_feat_resp get_resp;
2696 tbl_size = (1ULL << rss->tbl_log_size) *
2697 sizeof(struct ena_admin_rss_ind_table_entry);
2699 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2700 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2701 rss->rss_ind_tbl_dma_addr,
2709 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2710 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2715 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2719 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2721 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2725 /* The following function might return unsupported in case the
2726 * device doesn't support setting the key / hash function. We can safely
2727 * ignore this error and have indirection table support only.
2729 rc = ena_com_hash_key_allocate(ena_dev);
2731 ena_com_hash_key_fill_default_key(ena_dev);
2732 else if (rc != ENA_COM_UNSUPPORTED)
2735 rc = ena_com_hash_ctrl_init(ena_dev);
2742 ena_com_hash_key_destroy(ena_dev);
2744 ena_com_indirect_table_destroy(ena_dev);
2750 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2752 ena_com_indirect_table_destroy(ena_dev);
2753 ena_com_hash_key_destroy(ena_dev);
2754 ena_com_hash_ctrl_destroy(ena_dev);
2756 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2759 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2761 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2763 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2765 host_attr->host_info,
2766 host_attr->host_info_dma_addr,
2767 host_attr->host_info_dma_handle);
2768 if (unlikely(!host_attr->host_info))
2769 return ENA_COM_NO_MEM;
2771 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2772 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2773 (ENA_COMMON_SPEC_VERSION_MINOR));
2778 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2779 u32 debug_area_size)
2781 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2783 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2785 host_attr->debug_area_virt_addr,
2786 host_attr->debug_area_dma_addr,
2787 host_attr->debug_area_dma_handle);
2788 if (unlikely(!host_attr->debug_area_virt_addr)) {
2789 host_attr->debug_area_size = 0;
2790 return ENA_COM_NO_MEM;
2793 host_attr->debug_area_size = debug_area_size;
2798 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2800 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2802 if (host_attr->host_info) {
2803 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2805 host_attr->host_info,
2806 host_attr->host_info_dma_addr,
2807 host_attr->host_info_dma_handle);
2808 host_attr->host_info = NULL;
2812 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2814 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2816 if (host_attr->debug_area_virt_addr) {
2817 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2818 host_attr->debug_area_size,
2819 host_attr->debug_area_virt_addr,
2820 host_attr->debug_area_dma_addr,
2821 host_attr->debug_area_dma_handle);
2822 host_attr->debug_area_virt_addr = NULL;
2826 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2828 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2829 struct ena_com_admin_queue *admin_queue;
2830 struct ena_admin_set_feat_cmd cmd;
2831 struct ena_admin_set_feat_resp resp;
2835 /* Host attribute config is called before ena_com_get_dev_attr_feat
2836 * so ena_com can't check if the feature is supported.
2839 memset(&cmd, 0x0, sizeof(cmd));
2840 admin_queue = &ena_dev->admin_queue;
2842 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2843 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2845 ret = ena_com_mem_addr_set(ena_dev,
2846 &cmd.u.host_attr.debug_ba,
2847 host_attr->debug_area_dma_addr);
2848 if (unlikely(ret)) {
2849 ena_trc_err("memory address set failed\n");
2853 ret = ena_com_mem_addr_set(ena_dev,
2854 &cmd.u.host_attr.os_info_ba,
2855 host_attr->host_info_dma_addr);
2856 if (unlikely(ret)) {
2857 ena_trc_err("memory address set failed\n");
2861 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2863 ret = ena_com_execute_admin_command(admin_queue,
2864 (struct ena_admin_aq_entry *)&cmd,
2866 (struct ena_admin_acq_entry *)&resp,
2870 ena_trc_err("Failed to set host attributes: %d\n", ret);
2875 /* Interrupt moderation */
2876 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2878 return ena_com_check_supported_feature_id(ena_dev,
2879 ENA_ADMIN_INTERRUPT_MODERATION);
2882 static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2883 u32 intr_delay_resolution,
2884 u32 *intr_moder_interval)
2886 if (!intr_delay_resolution) {
2887 ena_trc_err("Illegal interrupt delay granularity value\n");
2888 return ENA_COM_FAULT;
2891 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2897 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2898 u32 tx_coalesce_usecs)
2900 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2901 ena_dev->intr_delay_resolution,
2902 &ena_dev->intr_moder_tx_interval);
2905 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2906 u32 rx_coalesce_usecs)
2908 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2909 ena_dev->intr_delay_resolution,
2910 &ena_dev->intr_moder_rx_interval);
2913 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2915 struct ena_admin_get_feat_resp get_resp;
2916 u16 delay_resolution;
2919 rc = ena_com_get_feature(ena_dev, &get_resp,
2920 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2923 if (rc == ENA_COM_UNSUPPORTED) {
2924 ena_trc_dbg("Feature %d isn't supported\n",
2925 ENA_ADMIN_INTERRUPT_MODERATION);
2928 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2932 /* no moderation supported, disable adaptive support */
2933 ena_com_disable_adaptive_moderation(ena_dev);
2937 /* if moderation is supported by device we set adaptive moderation */
2938 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2939 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2941 /* Disable adaptive moderation by default - can be enabled later */
2942 ena_com_disable_adaptive_moderation(ena_dev);
2947 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2949 return ena_dev->intr_moder_tx_interval;
2952 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2954 return ena_dev->intr_moder_rx_interval;
2957 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2958 struct ena_admin_feature_llq_desc *llq_features,
2959 struct ena_llq_configurations *llq_default_cfg)
2961 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2964 if (!llq_features->max_llq_num) {
2965 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2969 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2973 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2974 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2976 if (unlikely(ena_dev->tx_max_header_size == 0)) {
2977 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
2981 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;