4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 /*****************************************************************************/
37 /*****************************************************************************/
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (1000000)
42 #define ENA_ASYNC_QUEUE_DEPTH 4
43 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
46 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
48 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
49 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
50 | (ENA_COMMON_SPEC_VERSION_MINOR))
52 #define ENA_CTRL_MAJOR 0
53 #define ENA_CTRL_MINOR 0
54 #define ENA_CTRL_SUB_MINOR 1
56 #define MIN_ENA_CTRL_VER \
57 (((ENA_CTRL_MAJOR) << \
58 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
59 ((ENA_CTRL_MINOR) << \
60 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
63 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
64 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
66 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
68 static int ena_alloc_cnt;
70 /*****************************************************************************/
71 /*****************************************************************************/
72 /*****************************************************************************/
77 /* Abort - canceled by the driver */
82 ena_wait_event_t wait_event;
83 struct ena_admin_acq_entry *user_cqe;
85 enum ena_cmd_status status;
86 /* status from the device */
92 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
93 struct ena_common_mem_addr *ena_addr,
96 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
97 ena_trc_err("dma address has more bits that the device supports\n");
101 ena_addr->mem_addr_low = (u32)addr;
102 ena_addr->mem_addr_high =
103 ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 32)) >> 32);
108 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
110 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
111 ADMIN_SQ_SIZE(queue->q_depth),
114 queue->sq.mem_handle);
116 if (!queue->sq.entries) {
117 ena_trc_err("memory allocation failed");
118 return ENA_COM_NO_MEM;
125 queue->sq.db_addr = NULL;
130 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
132 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
133 ADMIN_CQ_SIZE(queue->q_depth),
136 queue->cq.mem_handle);
138 if (!queue->cq.entries) {
139 ena_trc_err("memory allocation failed");
140 return ENA_COM_NO_MEM;
149 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
150 struct ena_aenq_handlers *aenq_handlers)
152 u32 addr_low, addr_high, aenq_caps;
154 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
155 ENA_MEM_ALLOC_COHERENT(dev->dmadev,
156 ADMIN_AENQ_SIZE(dev->aenq.q_depth),
159 dev->aenq.mem_handle);
161 if (!dev->aenq.entries) {
162 ena_trc_err("memory allocation failed");
163 return ENA_COM_NO_MEM;
166 dev->aenq.head = dev->aenq.q_depth;
169 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(dev->aenq.dma_addr);
170 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(dev->aenq.dma_addr);
172 ENA_REG_WRITE32(addr_low, (unsigned char *)dev->reg_bar
173 + ENA_REGS_AENQ_BASE_LO_OFF);
174 ENA_REG_WRITE32(addr_high, (unsigned char *)dev->reg_bar
175 + ENA_REGS_AENQ_BASE_HI_OFF);
178 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
179 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
181 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
183 ENA_REG_WRITE32(aenq_caps, (unsigned char *)dev->reg_bar
184 + ENA_REGS_AENQ_CAPS_OFF);
186 if (unlikely(!aenq_handlers))
187 ena_trc_err("aenq handlers pointer is NULL\n");
189 dev->aenq.aenq_handlers = aenq_handlers;
194 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
195 struct ena_comp_ctx *comp_ctx)
197 comp_ctx->occupied = false;
198 ATOMIC32_DEC(&queue->outstanding_cmds);
201 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
202 u16 command_id, bool capture)
204 ENA_ASSERT(command_id < queue->q_depth,
205 "command id is larger than the queue size. cmd_id: %u queue size %d\n",
206 command_id, queue->q_depth);
208 ENA_ASSERT(!(queue->comp_ctx[command_id].occupied && capture),
209 "Completion context is occupied");
212 ATOMIC32_INC(&queue->outstanding_cmds);
213 queue->comp_ctx[command_id].occupied = true;
216 return &queue->comp_ctx[command_id];
219 static struct ena_comp_ctx *
220 __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
221 struct ena_admin_aq_entry *cmd,
222 size_t cmd_size_in_bytes,
223 struct ena_admin_acq_entry *comp,
224 size_t comp_size_in_bytes)
226 struct ena_comp_ctx *comp_ctx;
227 u16 tail_masked, cmd_id;
231 queue_size_mask = admin_queue->q_depth - 1;
233 tail_masked = admin_queue->sq.tail & queue_size_mask;
235 /* In case of queue FULL */
236 cnt = admin_queue->sq.tail - admin_queue->sq.head;
237 if (cnt >= admin_queue->q_depth) {
238 ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
239 admin_queue->sq.tail,
240 admin_queue->sq.head,
241 admin_queue->q_depth);
242 admin_queue->stats.out_of_space++;
243 return ERR_PTR(ENA_COM_NO_SPACE);
246 cmd_id = admin_queue->curr_cmd_id;
248 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
249 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
251 cmd->aq_common_descriptor.command_id |= cmd_id &
252 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
254 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
256 comp_ctx->status = ENA_CMD_SUBMITTED;
257 comp_ctx->comp_size = (u32)comp_size_in_bytes;
258 comp_ctx->user_cqe = comp;
259 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
261 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
263 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
265 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
268 admin_queue->sq.tail++;
269 admin_queue->stats.submitted_cmd++;
271 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
272 admin_queue->sq.phase = !admin_queue->sq.phase;
274 ENA_REG_WRITE32(admin_queue->sq.tail, admin_queue->sq.db_addr);
279 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
281 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
282 struct ena_comp_ctx *comp_ctx;
285 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
286 if (unlikely(!queue->comp_ctx)) {
287 ena_trc_err("memory allocation failed");
288 return ENA_COM_NO_MEM;
291 for (i = 0; i < queue->q_depth; i++) {
292 comp_ctx = get_comp_ctxt(queue, i, false);
293 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
299 static struct ena_comp_ctx *
300 ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
301 struct ena_admin_aq_entry *cmd,
302 size_t cmd_size_in_bytes,
303 struct ena_admin_acq_entry *comp,
304 size_t comp_size_in_bytes)
306 unsigned long flags = 0;
307 struct ena_comp_ctx *comp_ctx;
309 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
310 if (unlikely(!admin_queue->running_state)) {
311 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
312 return ERR_PTR(ENA_COM_NO_DEVICE);
314 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
318 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
323 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
324 struct ena_com_io_sq *io_sq)
328 memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
330 io_sq->desc_entry_size =
331 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
332 sizeof(struct ena_eth_io_tx_desc) :
333 sizeof(struct ena_eth_io_rx_desc);
335 size = io_sq->desc_entry_size * io_sq->q_depth;
337 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
338 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
340 io_sq->desc_addr.virt_addr,
341 io_sq->desc_addr.phys_addr,
342 io_sq->desc_addr.mem_handle);
344 io_sq->desc_addr.virt_addr =
345 ENA_MEM_ALLOC(ena_dev->dmadev, size);
347 if (!io_sq->desc_addr.virt_addr) {
348 ena_trc_err("memory allocation failed");
349 return ENA_COM_NO_MEM;
353 io_sq->next_to_comp = 0;
359 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
360 struct ena_com_io_cq *io_cq)
364 memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
366 /* Use the basic completion descriptor for Rx */
367 io_cq->cdesc_entry_size_in_bytes =
368 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
369 sizeof(struct ena_eth_io_tx_cdesc) :
370 sizeof(struct ena_eth_io_rx_cdesc_base);
372 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
374 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
376 io_cq->cdesc_addr.virt_addr,
377 io_cq->cdesc_addr.phys_addr,
378 io_cq->cdesc_addr.mem_handle);
380 if (!io_cq->cdesc_addr.virt_addr) {
381 ena_trc_err("memory allocation failed");
382 return ENA_COM_NO_MEM;
392 ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
393 struct ena_admin_acq_entry *cqe)
395 struct ena_comp_ctx *comp_ctx;
398 cmd_id = cqe->acq_common_descriptor.command &
399 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
401 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
403 comp_ctx->status = ENA_CMD_COMPLETED;
404 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
406 if (comp_ctx->user_cqe)
407 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
409 if (!admin_queue->polling)
410 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
414 ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
416 struct ena_admin_acq_entry *cqe = NULL;
421 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
422 phase = admin_queue->cq.phase;
424 cqe = &admin_queue->cq.entries[head_masked];
426 /* Go over all the completions */
427 while ((cqe->acq_common_descriptor.flags &
428 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
429 /* Do not read the rest of the completion entry before the
430 * phase bit was validated
433 ena_com_handle_single_admin_completion(admin_queue, cqe);
437 if (unlikely(head_masked == admin_queue->q_depth)) {
442 cqe = &admin_queue->cq.entries[head_masked];
445 admin_queue->cq.head += comp_num;
446 admin_queue->cq.phase = phase;
447 admin_queue->sq.head += comp_num;
448 admin_queue->stats.completed_cmd += comp_num;
451 static int ena_com_comp_status_to_errno(u8 comp_status)
453 if (unlikely(comp_status != 0))
454 ena_trc_err("admin command failed[%u]\n", comp_status);
456 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
457 return ENA_COM_INVAL;
459 switch (comp_status) {
460 case ENA_ADMIN_SUCCESS:
462 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
463 return ENA_COM_NO_MEM;
464 case ENA_ADMIN_UNSUPPORTED_OPCODE:
465 return ENA_COM_PERMISSION;
466 case ENA_ADMIN_BAD_OPCODE:
467 case ENA_ADMIN_MALFORMED_REQUEST:
468 case ENA_ADMIN_ILLEGAL_PARAMETER:
469 case ENA_ADMIN_UNKNOWN_ERROR:
470 return ENA_COM_INVAL;
477 ena_com_wait_and_process_admin_cq_polling(
478 struct ena_comp_ctx *comp_ctx,
479 struct ena_com_admin_queue *admin_queue)
481 unsigned long flags = 0;
485 start_time = ENA_GET_SYSTEM_USECS();
487 while (comp_ctx->status == ENA_CMD_SUBMITTED) {
488 if ((ENA_GET_SYSTEM_USECS() - start_time) >
489 ADMIN_CMD_TIMEOUT_US) {
490 ena_trc_err("Wait for completion (polling) timeout\n");
491 /* ENA didn't have any completion */
492 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
493 admin_queue->stats.no_completion++;
494 admin_queue->running_state = false;
495 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
497 ret = ENA_COM_TIMER_EXPIRED;
501 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
502 ena_com_handle_admin_completion(admin_queue);
503 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
506 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
507 ena_trc_err("Command was aborted\n");
508 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
509 admin_queue->stats.aborted_cmd++;
510 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
511 ret = ENA_COM_NO_DEVICE;
515 ENA_ASSERT(comp_ctx->status == ENA_CMD_COMPLETED,
516 "Invalid comp status %d\n", comp_ctx->status);
518 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
520 comp_ctxt_release(admin_queue, comp_ctx);
525 ena_com_wait_and_process_admin_cq_interrupts(
526 struct ena_comp_ctx *comp_ctx,
527 struct ena_com_admin_queue *admin_queue)
529 unsigned long flags = 0;
532 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
533 ADMIN_CMD_TIMEOUT_US);
535 /* In case the command wasn't completed find out the root cause.
536 * There might be 2 kinds of errors
537 * 1) No completion (timeout reached)
538 * 2) There is completion but the device didn't get any msi-x interrupt.
540 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
541 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
542 ena_com_handle_admin_completion(admin_queue);
543 admin_queue->stats.no_completion++;
544 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
546 if (comp_ctx->status == ENA_CMD_COMPLETED)
547 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
548 comp_ctx->cmd_opcode);
550 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
551 comp_ctx->cmd_opcode, comp_ctx->status);
553 admin_queue->running_state = false;
554 ret = ENA_COM_TIMER_EXPIRED;
558 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
560 comp_ctxt_release(admin_queue, comp_ctx);
564 /* This method read the hardware device register through posting writes
565 * and waiting for response
566 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
568 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
570 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
571 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
572 mmio_read->read_resp;
573 u32 mmio_read_reg, ret;
574 unsigned long flags = 0;
579 /* If readless is disabled, perform regular read */
580 if (!mmio_read->readless_supported)
581 return ENA_REG_READ32((unsigned char *)ena_dev->reg_bar +
584 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
585 mmio_read->seq_num++;
587 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
588 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
589 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
590 mmio_read_reg |= mmio_read->seq_num &
591 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
593 /* make sure read_resp->req_id get updated before the hw can write
598 ENA_REG_WRITE32(mmio_read_reg, (unsigned char *)ena_dev->reg_bar
599 + ENA_REGS_MMIO_REG_READ_OFF);
601 for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
602 if (read_resp->req_id == mmio_read->seq_num)
608 if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
609 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
614 ret = ENA_MMIO_READ_TIMEOUT;
618 ENA_ASSERT(read_resp->reg_off == offset,
619 "Invalid MMIO read return value");
621 ret = read_resp->reg_val;
623 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
628 /* There are two types to wait for completion.
629 * Polling mode - wait until the completion is available.
630 * Async mode - wait on wait queue until the completion is ready
631 * (or the timeout expired).
632 * It is expected that the IRQ called ena_com_handle_admin_completion
633 * to mark the completions.
636 ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
637 struct ena_com_admin_queue *admin_queue)
639 if (admin_queue->polling)
640 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
643 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
647 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
648 struct ena_com_io_sq *io_sq)
650 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
651 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
652 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
656 memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
658 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
659 direction = ENA_ADMIN_SQ_DIRECTION_TX;
661 direction = ENA_ADMIN_SQ_DIRECTION_RX;
663 destroy_cmd.sq.sq_identity |= (direction <<
664 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
665 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
667 destroy_cmd.sq.sq_idx = io_sq->idx;
668 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
670 ret = ena_com_execute_admin_command(
672 (struct ena_admin_aq_entry *)&destroy_cmd,
674 (struct ena_admin_acq_entry *)&destroy_resp,
675 sizeof(destroy_resp));
677 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
678 ena_trc_err("failed to destroy io sq error: %d\n", ret);
683 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
684 struct ena_com_io_sq *io_sq,
685 struct ena_com_io_cq *io_cq)
689 if (io_cq->cdesc_addr.virt_addr) {
690 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
692 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
694 io_cq->cdesc_addr.virt_addr,
695 io_cq->cdesc_addr.phys_addr,
696 io_cq->cdesc_addr.mem_handle);
698 io_cq->cdesc_addr.virt_addr = NULL;
701 if (io_sq->desc_addr.virt_addr) {
702 size = io_sq->desc_entry_size * io_sq->q_depth;
704 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
705 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
707 io_sq->desc_addr.virt_addr,
708 io_sq->desc_addr.phys_addr,
709 io_sq->desc_addr.mem_handle);
711 ENA_MEM_FREE(ena_dev->dmadev,
712 io_sq->desc_addr.virt_addr);
714 io_sq->desc_addr.virt_addr = NULL;
718 static int wait_for_reset_state(struct ena_com_dev *ena_dev,
719 u32 timeout, u16 exp_state)
723 for (i = 0; i < timeout; i++) {
724 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
726 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
727 ena_trc_err("Reg read timeout occurred\n");
728 return ENA_COM_TIMER_EXPIRED;
731 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
735 /* The resolution of the timeout is 100ms */
739 return ENA_COM_TIMER_EXPIRED;
743 ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
744 enum ena_admin_aq_feature_id feature_id)
746 u32 feature_mask = 1 << feature_id;
748 /* Device attributes is always supported */
749 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
750 !(ena_dev->supported_features & feature_mask))
756 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
757 struct ena_admin_get_feat_resp *get_resp,
758 enum ena_admin_aq_feature_id feature_id,
759 dma_addr_t control_buf_dma_addr,
760 u32 control_buff_size)
762 struct ena_com_admin_queue *admin_queue;
763 struct ena_admin_get_feat_cmd get_cmd;
767 ena_trc_err("%s : ena_dev is NULL\n", __func__);
768 return ENA_COM_NO_DEVICE;
771 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
772 ena_trc_info("Feature %d isn't supported\n", feature_id);
773 return ENA_COM_PERMISSION;
776 memset(&get_cmd, 0x0, sizeof(get_cmd));
777 admin_queue = &ena_dev->admin_queue;
779 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
781 if (control_buff_size)
782 get_cmd.aq_common_descriptor.flags =
783 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
785 get_cmd.aq_common_descriptor.flags = 0;
787 ret = ena_com_mem_addr_set(ena_dev,
788 &get_cmd.control_buffer.address,
789 control_buf_dma_addr);
791 ena_trc_err("memory address set failed\n");
795 get_cmd.control_buffer.length = control_buff_size;
797 get_cmd.feat_common.feature_id = feature_id;
799 ret = ena_com_execute_admin_command(admin_queue,
800 (struct ena_admin_aq_entry *)
803 (struct ena_admin_acq_entry *)
808 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
814 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
815 struct ena_admin_get_feat_resp *get_resp,
816 enum ena_admin_aq_feature_id feature_id)
818 return ena_com_get_feature_ex(ena_dev,
825 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
827 struct ena_rss *rss = &ena_dev->rss;
829 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
830 sizeof(*rss->hash_key),
832 rss->hash_key_dma_addr,
833 rss->hash_key_mem_handle);
835 if (unlikely(!rss->hash_key))
836 return ENA_COM_NO_MEM;
841 static int ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
843 struct ena_rss *rss = &ena_dev->rss;
846 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
847 sizeof(*rss->hash_key),
849 rss->hash_key_dma_addr,
850 rss->hash_key_mem_handle);
851 rss->hash_key = NULL;
855 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
857 struct ena_rss *rss = &ena_dev->rss;
859 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
860 sizeof(*rss->hash_ctrl),
862 rss->hash_ctrl_dma_addr,
863 rss->hash_ctrl_mem_handle);
868 static int ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
870 struct ena_rss *rss = &ena_dev->rss;
873 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
874 sizeof(*rss->hash_ctrl),
876 rss->hash_ctrl_dma_addr,
877 rss->hash_ctrl_mem_handle);
878 rss->hash_ctrl = NULL;
883 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
886 struct ena_rss *rss = &ena_dev->rss;
887 struct ena_admin_get_feat_resp get_resp;
891 ret = ena_com_get_feature(ena_dev, &get_resp,
892 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
896 if ((get_resp.u.ind_table.min_size > log_size) ||
897 (get_resp.u.ind_table.max_size < log_size)) {
898 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
900 1 << get_resp.u.ind_table.min_size,
901 1 << get_resp.u.ind_table.max_size);
902 return ENA_COM_INVAL;
905 tbl_size = (1 << log_size) *
906 sizeof(struct ena_admin_rss_ind_table_entry);
908 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
911 rss->rss_ind_tbl_dma_addr,
912 rss->rss_ind_tbl_mem_handle);
913 if (unlikely(!rss->rss_ind_tbl))
916 tbl_size = (1 << log_size) * sizeof(u16);
917 rss->host_rss_ind_tbl =
918 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
919 if (unlikely(!rss->host_rss_ind_tbl))
922 rss->tbl_log_size = log_size;
927 tbl_size = (1 << log_size) *
928 sizeof(struct ena_admin_rss_ind_table_entry);
930 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
933 rss->rss_ind_tbl_dma_addr,
934 rss->rss_ind_tbl_mem_handle);
935 rss->rss_ind_tbl = NULL;
937 rss->tbl_log_size = 0;
938 return ENA_COM_NO_MEM;
941 static int ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
943 struct ena_rss *rss = &ena_dev->rss;
944 size_t tbl_size = (1 << rss->tbl_log_size) *
945 sizeof(struct ena_admin_rss_ind_table_entry);
947 if (rss->rss_ind_tbl)
948 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
951 rss->rss_ind_tbl_dma_addr,
952 rss->rss_ind_tbl_mem_handle);
953 rss->rss_ind_tbl = NULL;
955 if (rss->host_rss_ind_tbl)
956 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
957 rss->host_rss_ind_tbl = NULL;
962 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
963 struct ena_com_io_sq *io_sq, u16 cq_idx)
965 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
966 struct ena_admin_aq_create_sq_cmd create_cmd;
967 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
971 memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
973 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
975 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
976 direction = ENA_ADMIN_SQ_DIRECTION_TX;
978 direction = ENA_ADMIN_SQ_DIRECTION_RX;
980 create_cmd.sq_identity |= (direction <<
981 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
982 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
984 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
985 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
987 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
988 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
989 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
991 create_cmd.sq_caps_3 |=
992 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
994 create_cmd.cq_idx = cq_idx;
995 create_cmd.sq_depth = io_sq->q_depth;
997 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
998 ret = ena_com_mem_addr_set(ena_dev,
1000 io_sq->desc_addr.phys_addr);
1001 if (unlikely(ret)) {
1002 ena_trc_err("memory address set failed\n");
1007 ret = ena_com_execute_admin_command(
1009 (struct ena_admin_aq_entry *)&create_cmd,
1011 (struct ena_admin_acq_entry *)&cmd_completion,
1012 sizeof(cmd_completion));
1013 if (unlikely(ret)) {
1014 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1018 io_sq->idx = cmd_completion.sq_idx;
1020 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1021 (uintptr_t)cmd_completion.sq_doorbell_offset);
1023 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1024 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1025 + cmd_completion.llq_headers_offset);
1027 io_sq->desc_addr.pbuf_dev_addr =
1028 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1029 cmd_completion.llq_descriptors_offset);
1032 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1037 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1039 struct ena_rss *rss = &ena_dev->rss;
1040 struct ena_com_io_sq *io_sq;
1044 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1045 qid = rss->host_rss_ind_tbl[i];
1046 if (qid >= ENA_TOTAL_NUM_QUEUES)
1047 return ENA_COM_INVAL;
1049 io_sq = &ena_dev->io_sq_queues[qid];
1051 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1052 return ENA_COM_INVAL;
1054 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1060 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1062 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { -1 };
1063 struct ena_rss *rss = &ena_dev->rss;
1066 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1067 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1069 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1070 idx = rss->rss_ind_tbl[i].cq_idx;
1071 if (idx > ENA_TOTAL_NUM_QUEUES)
1072 return ENA_COM_INVAL;
1074 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1075 return ENA_COM_INVAL;
1077 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1083 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1087 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1089 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1090 if (!ena_dev->intr_moder_tbl)
1091 return ENA_COM_NO_MEM;
1093 ena_com_config_default_interrupt_moderation_table(ena_dev);
1099 ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1100 unsigned int intr_delay_resolution)
1102 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1105 if (!intr_delay_resolution) {
1106 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1107 intr_delay_resolution = 1;
1109 ena_dev->intr_delay_resolution = intr_delay_resolution;
1112 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1113 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1116 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1119 /*****************************************************************************/
1120 /******************************* API ******************************/
1121 /*****************************************************************************/
1123 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1124 struct ena_admin_aq_entry *cmd,
1126 struct ena_admin_acq_entry *comp,
1129 struct ena_comp_ctx *comp_ctx;
1132 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1134 if (unlikely(IS_ERR(comp_ctx))) {
1135 ena_trc_err("Failed to submit command [%ld]\n",
1137 return PTR_ERR(comp_ctx);
1140 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1141 if (unlikely(ret)) {
1142 if (admin_queue->running_state)
1143 ena_trc_err("Failed to process command. ret = %d\n",
1146 ena_trc_dbg("Failed to process command. ret = %d\n",
1152 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1153 struct ena_com_io_cq *io_cq)
1155 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1156 struct ena_admin_aq_create_cq_cmd create_cmd;
1157 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1160 memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
1162 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1164 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1165 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1166 create_cmd.cq_caps_1 |=
1167 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1169 create_cmd.msix_vector = io_cq->msix_vector;
1170 create_cmd.cq_depth = io_cq->q_depth;
1172 ret = ena_com_mem_addr_set(ena_dev,
1174 io_cq->cdesc_addr.phys_addr);
1175 if (unlikely(ret)) {
1176 ena_trc_err("memory address set failed\n");
1180 ret = ena_com_execute_admin_command(
1182 (struct ena_admin_aq_entry *)&create_cmd,
1184 (struct ena_admin_acq_entry *)&cmd_completion,
1185 sizeof(cmd_completion));
1186 if (unlikely(ret)) {
1187 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1191 io_cq->idx = cmd_completion.cq_idx;
1192 io_cq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1193 cmd_completion.cq_doorbell_offset);
1195 if (io_cq->q_depth != cmd_completion.cq_actual_depth) {
1196 ena_trc_err("completion actual queue size (%d) is differ from requested size (%d)\n",
1197 cmd_completion.cq_actual_depth, io_cq->q_depth);
1198 ena_com_destroy_io_cq(ena_dev, io_cq);
1199 return ENA_COM_NO_SPACE;
1202 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1203 cmd_completion.cq_interrupt_unmask_register);
1205 if (cmd_completion.cq_head_db_offset)
1206 io_cq->cq_head_db_reg =
1207 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1208 cmd_completion.cq_head_db_offset);
1210 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1215 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1216 struct ena_com_io_sq **io_sq,
1217 struct ena_com_io_cq **io_cq)
1219 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1220 ena_trc_err("Invalid queue number %d but the max is %d\n",
1221 qid, ENA_TOTAL_NUM_QUEUES);
1222 return ENA_COM_INVAL;
1225 *io_sq = &ena_dev->io_sq_queues[qid];
1226 *io_cq = &ena_dev->io_cq_queues[qid];
1231 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1233 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1234 struct ena_comp_ctx *comp_ctx;
1237 if (!admin_queue->comp_ctx)
1240 for (i = 0; i < admin_queue->q_depth; i++) {
1241 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1242 comp_ctx->status = ENA_CMD_ABORTED;
1244 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1248 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1250 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1251 unsigned long flags = 0;
1253 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1254 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1255 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1257 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1259 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1262 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1263 struct ena_com_io_cq *io_cq)
1265 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1266 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1267 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1270 memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
1272 destroy_cmd.cq_idx = io_cq->idx;
1273 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1275 ret = ena_com_execute_admin_command(
1277 (struct ena_admin_aq_entry *)&destroy_cmd,
1278 sizeof(destroy_cmd),
1279 (struct ena_admin_acq_entry *)&destroy_resp,
1280 sizeof(destroy_resp));
1282 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1283 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1288 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1290 return ena_dev->admin_queue.running_state;
1293 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1295 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1296 unsigned long flags = 0;
1298 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1299 ena_dev->admin_queue.running_state = state;
1300 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1303 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1305 u16 depth = ena_dev->aenq.q_depth;
1307 ENA_ASSERT(ena_dev->aenq.head == depth, "Invliad AENQ state\n");
1309 /* Init head_db to mark that all entries in the queue
1310 * are initially available
1312 ENA_REG_WRITE32(depth, (unsigned char *)ena_dev->reg_bar
1313 + ENA_REGS_AENQ_HEAD_DB_OFF);
1316 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1318 struct ena_com_admin_queue *admin_queue;
1319 struct ena_admin_set_feat_cmd cmd;
1320 struct ena_admin_set_feat_resp resp;
1321 struct ena_admin_get_feat_resp get_resp;
1324 if (unlikely(!ena_dev)) {
1325 ena_trc_err("%s : ena_dev is NULL\n", __func__);
1326 return ENA_COM_NO_DEVICE;
1329 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1331 ena_trc_info("Can't get aenq configuration\n");
1335 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1336 ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1337 get_resp.u.aenq.supported_groups,
1339 return ENA_COM_PERMISSION;
1342 memset(&cmd, 0x0, sizeof(cmd));
1343 admin_queue = &ena_dev->admin_queue;
1345 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1346 cmd.aq_common_descriptor.flags = 0;
1347 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1348 cmd.u.aenq.enabled_groups = groups_flag;
1350 ret = ena_com_execute_admin_command(admin_queue,
1351 (struct ena_admin_aq_entry *)&cmd,
1353 (struct ena_admin_acq_entry *)&resp,
1357 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1362 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1364 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1367 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1368 ena_trc_err("Reg read timeout occurred\n");
1369 return ENA_COM_TIMER_EXPIRED;
1372 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1373 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1375 ena_trc_dbg("ENA dma width: %d\n", width);
1377 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1378 ena_trc_err("DMA width illegal value: %d\n", width);
1379 return ENA_COM_INVAL;
1382 ena_dev->dma_addr_bits = width;
1387 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1391 u32 ctrl_ver_masked;
1393 /* Make sure the ENA version and the controller version are at least
1394 * as the driver expects
1396 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1397 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1398 ENA_REGS_CONTROLLER_VERSION_OFF);
1400 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1401 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1402 ena_trc_err("Reg read timeout occurred\n");
1403 return ENA_COM_TIMER_EXPIRED;
1406 ena_trc_info("ena device version: %d.%d\n",
1407 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1408 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1409 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1411 if (ver < MIN_ENA_VER) {
1412 ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
1416 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1417 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1418 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1419 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1420 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1421 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1422 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1423 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1426 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1427 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1428 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1430 /* Validate the ctrl version without the implementation ID */
1431 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1432 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1439 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1441 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1446 if (admin_queue->comp_ctx)
1447 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
1448 admin_queue->comp_ctx = NULL;
1450 if (admin_queue->sq.entries)
1451 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1452 ADMIN_SQ_SIZE(admin_queue->q_depth),
1453 admin_queue->sq.entries,
1454 admin_queue->sq.dma_addr,
1455 admin_queue->sq.mem_handle);
1456 admin_queue->sq.entries = NULL;
1458 if (admin_queue->cq.entries)
1459 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1460 ADMIN_CQ_SIZE(admin_queue->q_depth),
1461 admin_queue->cq.entries,
1462 admin_queue->cq.dma_addr,
1463 admin_queue->cq.mem_handle);
1464 admin_queue->cq.entries = NULL;
1466 if (ena_dev->aenq.entries)
1467 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1468 ADMIN_AENQ_SIZE(ena_dev->aenq.q_depth),
1469 ena_dev->aenq.entries,
1470 ena_dev->aenq.dma_addr,
1471 ena_dev->aenq.mem_handle);
1472 ena_dev->aenq.entries = NULL;
1475 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1477 ena_dev->admin_queue.polling = polling;
1480 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1482 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1484 ENA_SPINLOCK_INIT(mmio_read->lock);
1485 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1486 sizeof(*mmio_read->read_resp),
1487 mmio_read->read_resp,
1488 mmio_read->read_resp_dma_addr,
1489 mmio_read->read_resp_mem_handle);
1490 if (unlikely(!mmio_read->read_resp))
1491 return ENA_COM_NO_MEM;
1493 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1495 mmio_read->read_resp->req_id = 0x0;
1496 mmio_read->seq_num = 0x0;
1497 mmio_read->readless_supported = true;
1503 ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1505 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1507 mmio_read->readless_supported = readless_supported;
1510 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1512 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1514 ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
1515 + ENA_REGS_MMIO_RESP_LO_OFF);
1516 ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
1517 + ENA_REGS_MMIO_RESP_HI_OFF);
1519 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1520 sizeof(*mmio_read->read_resp),
1521 mmio_read->read_resp,
1522 mmio_read->read_resp_dma_addr,
1523 mmio_read->read_resp_mem_handle);
1525 mmio_read->read_resp = NULL;
1528 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1530 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1531 u32 addr_low, addr_high;
1533 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1534 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1536 ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
1537 + ENA_REGS_MMIO_RESP_LO_OFF);
1538 ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
1539 + ENA_REGS_MMIO_RESP_HI_OFF);
1542 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1543 struct ena_aenq_handlers *aenq_handlers,
1546 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1547 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1550 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1552 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1553 ena_trc_err("Reg read timeout occurred\n");
1554 return ENA_COM_TIMER_EXPIRED;
1557 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1558 ena_trc_err("Device isn't ready, abort com init\n");
1562 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1564 admin_queue->q_dmadev = ena_dev->dmadev;
1565 admin_queue->polling = false;
1566 admin_queue->curr_cmd_id = 0;
1568 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1571 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1573 ret = ena_com_init_comp_ctxt(admin_queue);
1577 ret = ena_com_admin_init_sq(admin_queue);
1581 ret = ena_com_admin_init_cq(admin_queue);
1585 admin_queue->sq.db_addr = (u32 __iomem *)
1586 ((unsigned char *)ena_dev->reg_bar + ENA_REGS_AQ_DB_OFF);
1588 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1589 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1591 ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
1592 + ENA_REGS_AQ_BASE_LO_OFF);
1593 ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
1594 + ENA_REGS_AQ_BASE_HI_OFF);
1596 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1597 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1599 ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
1600 + ENA_REGS_ACQ_BASE_LO_OFF);
1601 ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
1602 + ENA_REGS_ACQ_BASE_HI_OFF);
1605 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1606 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1607 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1608 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1611 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1612 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1613 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1614 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1616 ENA_REG_WRITE32(aq_caps, (unsigned char *)ena_dev->reg_bar
1617 + ENA_REGS_AQ_CAPS_OFF);
1618 ENA_REG_WRITE32(acq_caps, (unsigned char *)ena_dev->reg_bar
1619 + ENA_REGS_ACQ_CAPS_OFF);
1620 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1624 admin_queue->running_state = true;
1628 ena_com_admin_destroy(ena_dev);
1633 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1635 enum queue_direction direction,
1636 enum ena_admin_placement_policy_type mem_queue_type,
1640 struct ena_com_io_sq *io_sq;
1641 struct ena_com_io_cq *io_cq;
1644 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1645 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1646 qid, ENA_TOTAL_NUM_QUEUES);
1647 return ENA_COM_INVAL;
1650 io_sq = &ena_dev->io_sq_queues[qid];
1651 io_cq = &ena_dev->io_cq_queues[qid];
1653 memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
1654 memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
1657 io_cq->q_depth = queue_size;
1658 io_cq->direction = direction;
1661 io_cq->msix_vector = msix_vector;
1663 io_sq->q_depth = queue_size;
1664 io_sq->direction = direction;
1667 io_sq->mem_queue_type = mem_queue_type;
1669 if (direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1670 /* header length is limited to 8 bits */
1671 io_sq->tx_max_header_size =
1672 ENA_MIN16(ena_dev->tx_max_header_size, SZ_256);
1674 ret = ena_com_init_io_sq(ena_dev, io_sq);
1677 ret = ena_com_init_io_cq(ena_dev, io_cq);
1681 ret = ena_com_create_io_cq(ena_dev, io_cq);
1685 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1692 ena_com_destroy_io_cq(ena_dev, io_cq);
1694 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1698 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1700 struct ena_com_io_sq *io_sq;
1701 struct ena_com_io_cq *io_cq;
1703 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1704 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1705 qid, ENA_TOTAL_NUM_QUEUES);
1709 io_sq = &ena_dev->io_sq_queues[qid];
1710 io_cq = &ena_dev->io_cq_queues[qid];
1712 ena_com_destroy_io_sq(ena_dev, io_sq);
1713 ena_com_destroy_io_cq(ena_dev, io_cq);
1715 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1718 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1719 struct ena_admin_get_feat_resp *resp)
1721 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1724 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1725 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1727 struct ena_admin_get_feat_resp get_resp;
1730 rc = ena_com_get_feature(ena_dev, &get_resp,
1731 ENA_ADMIN_DEVICE_ATTRIBUTES);
1735 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1736 sizeof(get_resp.u.dev_attr));
1737 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1739 rc = ena_com_get_feature(ena_dev, &get_resp,
1740 ENA_ADMIN_MAX_QUEUES_NUM);
1744 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1745 sizeof(get_resp.u.max_queue));
1746 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1748 rc = ena_com_get_feature(ena_dev, &get_resp,
1749 ENA_ADMIN_AENQ_CONFIG);
1753 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1754 sizeof(get_resp.u.aenq));
1756 rc = ena_com_get_feature(ena_dev, &get_resp,
1757 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1761 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1762 sizeof(get_resp.u.offload));
1767 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1769 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1772 /* ena_handle_specific_aenq_event:
1773 * return the handler that is relevant to the specific event group
1775 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1778 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1780 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1781 return aenq_handlers->handlers[group];
1783 return aenq_handlers->unimplemented_handler;
1786 /* ena_aenq_intr_handler:
1787 * handles the aenq incoming events.
1788 * pop events from the queue and apply the specific handler
1790 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1792 struct ena_admin_aenq_entry *aenq_e;
1793 struct ena_admin_aenq_common_desc *aenq_common;
1794 struct ena_com_aenq *aenq = &dev->aenq;
1795 ena_aenq_handler handler_cb;
1796 u16 masked_head, processed = 0;
1799 masked_head = aenq->head & (aenq->q_depth - 1);
1800 phase = aenq->phase;
1801 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1802 aenq_common = &aenq_e->aenq_common_desc;
1804 /* Go over all the events */
1805 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1807 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1809 aenq_common->syndrom,
1810 (unsigned long long)aenq_common->timestamp_low +
1811 ((u64)aenq_common->timestamp_high << 32));
1813 /* Handle specific event*/
1814 handler_cb = ena_com_get_specific_aenq_cb(dev,
1815 aenq_common->group);
1816 handler_cb(data, aenq_e); /* call the actual event handler*/
1818 /* Get next event entry */
1822 if (unlikely(masked_head == aenq->q_depth)) {
1826 aenq_e = &aenq->entries[masked_head];
1827 aenq_common = &aenq_e->aenq_common_desc;
1830 aenq->head += processed;
1831 aenq->phase = phase;
1833 /* Don't update aenq doorbell if there weren't any processed events */
1837 /* write the aenq doorbell after all AENQ descriptors were read */
1839 ENA_REG_WRITE32((u32)aenq->head, (unsigned char *)dev->reg_bar
1840 + ENA_REGS_AENQ_HEAD_DB_OFF);
1843 /* Sets the function Idx and Queue Idx to be used for
1844 * get full statistics feature
1846 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
1849 /* Function & Queue is acquired from user in the following format :
1850 * Bottom Half word: funct
1851 * Top Half Word: queue
1853 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
1854 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
1859 int ena_com_dev_reset(struct ena_com_dev *ena_dev)
1861 u32 stat, timeout, cap, reset_val;
1864 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1865 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1867 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1868 (cap == ENA_MMIO_READ_TIMEOUT))) {
1869 ena_trc_err("Reg read32 timeout occurred\n");
1870 return ENA_COM_TIMER_EXPIRED;
1873 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1874 ena_trc_err("Device isn't ready, can't reset device\n");
1875 return ENA_COM_INVAL;
1878 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1879 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1881 ena_trc_err("Invalid timeout value\n");
1882 return ENA_COM_INVAL;
1886 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1887 ENA_REG_WRITE32(reset_val, (unsigned char *)ena_dev->reg_bar
1888 + ENA_REGS_DEV_CTL_OFF);
1890 /* Write again the MMIO read request address */
1891 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1893 rc = wait_for_reset_state(ena_dev, timeout,
1894 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1896 ena_trc_err("Reset indication didn't turn on\n");
1901 ENA_REG_WRITE32(0, (unsigned char *)ena_dev->reg_bar
1902 + ENA_REGS_DEV_CTL_OFF);
1903 rc = wait_for_reset_state(ena_dev, timeout, 0);
1905 ena_trc_err("Reset indication didn't turn off\n");
1912 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1913 struct ena_admin_aq_get_stats_cmd *get_cmd,
1914 struct ena_admin_acq_get_stats_resp *get_resp,
1915 enum ena_admin_get_stats_type type)
1917 struct ena_com_admin_queue *admin_queue;
1921 ena_trc_err("%s : ena_dev is NULL\n", __func__);
1922 return ENA_COM_NO_DEVICE;
1925 admin_queue = &ena_dev->admin_queue;
1927 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1928 get_cmd->aq_common_descriptor.flags = 0;
1929 get_cmd->type = type;
1931 ret = ena_com_execute_admin_command(
1933 (struct ena_admin_aq_entry *)get_cmd,
1935 (struct ena_admin_acq_entry *)get_resp,
1939 ena_trc_err("Failed to get stats. error: %d\n", ret);
1944 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1945 struct ena_admin_basic_stats *stats)
1948 struct ena_admin_aq_get_stats_cmd get_cmd;
1949 struct ena_admin_acq_get_stats_resp get_resp;
1951 memset(&get_cmd, 0x0, sizeof(get_cmd));
1952 ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
1953 ENA_ADMIN_GET_STATS_TYPE_BASIC);
1954 if (likely(ret == 0))
1955 memcpy(stats, &get_resp.basic_stats,
1956 sizeof(get_resp.basic_stats));
1961 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
1965 struct ena_admin_aq_get_stats_cmd get_cmd;
1966 struct ena_admin_acq_get_stats_resp get_resp;
1967 ena_mem_handle_t mem_handle = 0;
1969 dma_addr_t phys_addr;
1971 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
1972 virt_addr, phys_addr, mem_handle);
1974 ret = ENA_COM_NO_MEM;
1977 memset(&get_cmd, 0x0, sizeof(get_cmd));
1978 ret = ena_com_mem_addr_set(ena_dev,
1979 &get_cmd.u.control_buffer.address,
1981 if (unlikely(ret)) {
1982 ena_trc_err("memory address set failed\n");
1985 get_cmd.u.control_buffer.length = len;
1987 get_cmd.device_id = ena_dev->stats_func;
1988 get_cmd.queue_idx = ena_dev->stats_queue;
1990 ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
1991 ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
1993 goto free_ext_stats_mem;
1995 ret = snprintf(buff, len, "%s", (char *)virt_addr);
1998 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2004 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2006 struct ena_com_admin_queue *admin_queue;
2007 struct ena_admin_set_feat_cmd cmd;
2008 struct ena_admin_set_feat_resp resp;
2011 if (unlikely(!ena_dev)) {
2012 ena_trc_err("%s : ena_dev is NULL\n", __func__);
2013 return ENA_COM_NO_DEVICE;
2016 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2017 ena_trc_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2018 return ENA_COM_PERMISSION;
2021 memset(&cmd, 0x0, sizeof(cmd));
2022 admin_queue = &ena_dev->admin_queue;
2024 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2025 cmd.aq_common_descriptor.flags = 0;
2026 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2027 cmd.u.mtu.mtu = mtu;
2029 ret = ena_com_execute_admin_command(admin_queue,
2030 (struct ena_admin_aq_entry *)&cmd,
2032 (struct ena_admin_acq_entry *)&resp,
2035 if (unlikely(ret)) {
2036 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2037 return ENA_COM_INVAL;
2042 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2043 struct ena_admin_feature_offload_desc *offload)
2046 struct ena_admin_get_feat_resp resp;
2048 ret = ena_com_get_feature(ena_dev, &resp,
2049 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
2050 if (unlikely(ret)) {
2051 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2052 return ENA_COM_INVAL;
2055 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2060 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2062 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2063 struct ena_rss *rss = &ena_dev->rss;
2064 struct ena_admin_set_feat_cmd cmd;
2065 struct ena_admin_set_feat_resp resp;
2066 struct ena_admin_get_feat_resp get_resp;
2069 if (!ena_com_check_supported_feature_id(ena_dev,
2070 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2071 ena_trc_info("Feature %d isn't supported\n",
2072 ENA_ADMIN_RSS_HASH_FUNCTION);
2073 return ENA_COM_PERMISSION;
2076 /* Validate hash function is supported */
2077 ret = ena_com_get_feature(ena_dev, &get_resp,
2078 ENA_ADMIN_RSS_HASH_FUNCTION);
2082 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2083 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2085 return ENA_COM_PERMISSION;
2088 memset(&cmd, 0x0, sizeof(cmd));
2090 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2091 cmd.aq_common_descriptor.flags =
2092 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2093 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2094 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2095 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2097 ret = ena_com_mem_addr_set(ena_dev,
2098 &cmd.control_buffer.address,
2099 rss->hash_key_dma_addr);
2100 if (unlikely(ret)) {
2101 ena_trc_err("memory address set failed\n");
2105 cmd.control_buffer.length = sizeof(*rss->hash_key);
2107 ret = ena_com_execute_admin_command(admin_queue,
2108 (struct ena_admin_aq_entry *)&cmd,
2110 (struct ena_admin_acq_entry *)&resp,
2112 if (unlikely(ret)) {
2113 ena_trc_err("Failed to set hash function %d. error: %d\n",
2114 rss->hash_func, ret);
2115 return ENA_COM_INVAL;
2121 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2122 enum ena_admin_hash_functions func,
2123 const u8 *key, u16 key_len, u32 init_val)
2125 struct ena_rss *rss = &ena_dev->rss;
2126 struct ena_admin_get_feat_resp get_resp;
2127 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2131 /* Make sure size is a mult of DWs */
2132 if (unlikely(key_len & 0x3))
2133 return ENA_COM_INVAL;
2135 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2136 ENA_ADMIN_RSS_HASH_FUNCTION,
2137 rss->hash_key_dma_addr,
2138 sizeof(*rss->hash_key));
2142 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2143 ena_trc_err("Flow hash function %d isn't supported\n", func);
2144 return ENA_COM_PERMISSION;
2148 case ENA_ADMIN_TOEPLITZ:
2149 if (key_len > sizeof(hash_key->key)) {
2150 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2151 key_len, sizeof(hash_key->key));
2152 return ENA_COM_INVAL;
2155 memcpy(hash_key->key, key, key_len);
2156 rss->hash_init_val = init_val;
2157 hash_key->keys_num = key_len >> 2;
2159 case ENA_ADMIN_CRC32:
2160 rss->hash_init_val = init_val;
2163 ena_trc_err("Invalid hash function (%d)\n", func);
2164 return ENA_COM_INVAL;
2167 rc = ena_com_set_hash_function(ena_dev);
2169 /* Restore the old function */
2171 ena_com_get_hash_function(ena_dev, NULL, NULL);
2176 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2177 enum ena_admin_hash_functions *func,
2180 struct ena_rss *rss = &ena_dev->rss;
2181 struct ena_admin_get_feat_resp get_resp;
2182 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2186 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2187 ENA_ADMIN_RSS_HASH_FUNCTION,
2188 rss->hash_key_dma_addr,
2189 sizeof(*rss->hash_key));
2193 rss->hash_func = (enum ena_admin_hash_functions)get_resp.u.flow_hash_func.selected_func;
2195 *func = rss->hash_func;
2198 memcpy(key, hash_key->key, hash_key->keys_num << 2);
2203 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2204 enum ena_admin_flow_hash_proto proto,
2207 struct ena_rss *rss = &ena_dev->rss;
2208 struct ena_admin_get_feat_resp get_resp;
2211 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2212 ENA_ADMIN_RSS_HASH_INPUT,
2213 rss->hash_ctrl_dma_addr,
2214 sizeof(*rss->hash_ctrl));
2219 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2224 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2226 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2227 struct ena_rss *rss = &ena_dev->rss;
2228 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2229 struct ena_admin_set_feat_cmd cmd;
2230 struct ena_admin_set_feat_resp resp;
2233 if (!ena_com_check_supported_feature_id(ena_dev,
2234 ENA_ADMIN_RSS_HASH_INPUT)) {
2235 ena_trc_info("Feature %d isn't supported\n",
2236 ENA_ADMIN_RSS_HASH_INPUT);
2237 return ENA_COM_PERMISSION;
2240 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2241 cmd.aq_common_descriptor.flags =
2242 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2243 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2244 cmd.u.flow_hash_input.enabled_input_sort =
2245 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2246 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2248 ret = ena_com_mem_addr_set(ena_dev,
2249 &cmd.control_buffer.address,
2250 rss->hash_ctrl_dma_addr);
2251 if (unlikely(ret)) {
2252 ena_trc_err("memory address set failed\n");
2255 cmd.control_buffer.length = sizeof(*hash_ctrl);
2257 ret = ena_com_execute_admin_command(admin_queue,
2258 (struct ena_admin_aq_entry *)&cmd,
2260 (struct ena_admin_acq_entry *)&resp,
2262 if (unlikely(ret)) {
2263 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2264 ret = ENA_COM_INVAL;
2270 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2272 struct ena_rss *rss = &ena_dev->rss;
2273 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2275 u16 available_fields = 0;
2278 /* Get the supported hash input */
2279 rc = ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
2283 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2284 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2285 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2287 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2288 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2289 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2291 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2292 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2293 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2295 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2296 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2297 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2299 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2300 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2302 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2303 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2305 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2306 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2308 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2309 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2311 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2312 available_fields = hash_ctrl->selected_fields[i].fields &
2313 hash_ctrl->supported_fields[i].fields;
2314 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2315 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2316 i, hash_ctrl->supported_fields[i].fields,
2317 hash_ctrl->selected_fields[i].fields);
2318 return ENA_COM_PERMISSION;
2322 rc = ena_com_set_hash_ctrl(ena_dev);
2324 /* In case of failure, restore the old hash ctrl */
2326 ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
2331 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2332 enum ena_admin_flow_hash_proto proto,
2335 struct ena_rss *rss = &ena_dev->rss;
2336 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2337 u16 supported_fields;
2340 if (proto > ENA_ADMIN_RSS_PROTO_NUM) {
2341 ena_trc_err("Invalid proto num (%u)\n", proto);
2342 return ENA_COM_INVAL;
2345 /* Get the ctrl table */
2346 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2350 /* Make sure all the fields are supported */
2351 supported_fields = hash_ctrl->supported_fields[proto].fields;
2352 if ((hash_fields & supported_fields) != hash_fields) {
2353 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2354 proto, hash_fields, supported_fields);
2357 hash_ctrl->selected_fields[proto].fields = hash_fields;
2359 rc = ena_com_set_hash_ctrl(ena_dev);
2361 /* In case of failure, restore the old hash ctrl */
2363 ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
2368 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2369 u16 entry_idx, u16 entry_value)
2371 struct ena_rss *rss = &ena_dev->rss;
2373 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2374 return ENA_COM_INVAL;
2376 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2377 return ENA_COM_INVAL;
2379 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2384 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2386 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2387 struct ena_rss *rss = &ena_dev->rss;
2388 struct ena_admin_set_feat_cmd cmd;
2389 struct ena_admin_set_feat_resp resp;
2392 if (!ena_com_check_supported_feature_id(
2394 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2395 ena_trc_info("Feature %d isn't supported\n",
2396 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2397 return ENA_COM_PERMISSION;
2400 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2402 ena_trc_err("Failed to convert host indirection table to device table\n");
2406 memset(&cmd, 0x0, sizeof(cmd));
2408 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2409 cmd.aq_common_descriptor.flags =
2410 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2411 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2412 cmd.u.ind_table.size = rss->tbl_log_size;
2413 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2415 ret = ena_com_mem_addr_set(ena_dev,
2416 &cmd.control_buffer.address,
2417 rss->rss_ind_tbl_dma_addr);
2418 if (unlikely(ret)) {
2419 ena_trc_err("memory address set failed\n");
2423 cmd.control_buffer.length = (1 << rss->tbl_log_size) *
2424 sizeof(struct ena_admin_rss_ind_table_entry);
2426 ret = ena_com_execute_admin_command(admin_queue,
2427 (struct ena_admin_aq_entry *)&cmd,
2429 (struct ena_admin_acq_entry *)&resp,
2432 if (unlikely(ret)) {
2433 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2434 return ENA_COM_INVAL;
2440 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2442 struct ena_rss *rss = &ena_dev->rss;
2443 struct ena_admin_get_feat_resp get_resp;
2447 tbl_size = (1 << rss->tbl_log_size) *
2448 sizeof(struct ena_admin_rss_ind_table_entry);
2450 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2451 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2452 rss->rss_ind_tbl_dma_addr,
2460 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2464 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2465 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2470 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2474 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2476 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2480 rc = ena_com_hash_key_allocate(ena_dev);
2484 rc = ena_com_hash_ctrl_init(ena_dev);
2491 ena_com_hash_key_destroy(ena_dev);
2493 ena_com_indirect_table_destroy(ena_dev);
2499 int ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2501 ena_com_indirect_table_destroy(ena_dev);
2502 ena_com_hash_key_destroy(ena_dev);
2503 ena_com_hash_ctrl_destroy(ena_dev);
2505 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2510 int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev,
2511 u32 debug_area_size)
2513 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2516 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2518 host_attr->host_info,
2519 host_attr->host_info_dma_addr,
2520 host_attr->host_info_dma_handle);
2521 if (unlikely(!host_attr->host_info))
2522 return ENA_COM_NO_MEM;
2524 if (debug_area_size) {
2525 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2527 host_attr->debug_area_virt_addr,
2528 host_attr->debug_area_dma_addr,
2529 host_attr->debug_area_dma_handle);
2530 if (unlikely(!host_attr->debug_area_virt_addr)) {
2531 rc = ENA_COM_NO_MEM;
2536 host_attr->debug_area_size = debug_area_size;
2541 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2543 host_attr->host_info,
2544 host_attr->host_info_dma_addr,
2545 host_attr->host_info_dma_handle);
2546 host_attr->host_info = NULL;
2550 void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev)
2552 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2554 if (host_attr->host_info) {
2555 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2557 host_attr->host_info,
2558 host_attr->host_info_dma_addr,
2559 host_attr->host_info_dma_handle);
2560 host_attr->host_info = NULL;
2563 if (host_attr->debug_area_virt_addr) {
2564 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2565 host_attr->debug_area_size,
2566 host_attr->debug_area_virt_addr,
2567 host_attr->debug_area_dma_addr,
2568 host_attr->debug_area_dma_handle);
2569 host_attr->debug_area_virt_addr = NULL;
2573 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2575 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2576 struct ena_com_admin_queue *admin_queue;
2577 struct ena_admin_set_feat_cmd cmd;
2578 struct ena_admin_set_feat_resp resp;
2582 if (unlikely(!ena_dev)) {
2583 ena_trc_err("%s : ena_dev is NULL\n", __func__);
2584 return ENA_COM_NO_DEVICE;
2587 if (!ena_com_check_supported_feature_id(ena_dev,
2588 ENA_ADMIN_HOST_ATTR_CONFIG)) {
2589 ena_trc_warn("Set host attribute isn't supported\n");
2590 return ENA_COM_PERMISSION;
2593 memset(&cmd, 0x0, sizeof(cmd));
2594 admin_queue = &ena_dev->admin_queue;
2596 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2597 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2599 ret = ena_com_mem_addr_set(ena_dev,
2600 &cmd.u.host_attr.debug_ba,
2601 host_attr->debug_area_dma_addr);
2602 if (unlikely(ret)) {
2603 ena_trc_err("memory address set failed\n");
2607 ret = ena_com_mem_addr_set(ena_dev,
2608 &cmd.u.host_attr.os_info_ba,
2609 host_attr->host_info_dma_addr);
2610 if (unlikely(ret)) {
2611 ena_trc_err("memory address set failed\n");
2615 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2617 ret = ena_com_execute_admin_command(admin_queue,
2618 (struct ena_admin_aq_entry *)&cmd,
2620 (struct ena_admin_acq_entry *)&resp,
2624 ena_trc_err("Failed to set host attributes: %d\n", ret);
2629 /* Interrupt moderation */
2630 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2632 return ena_com_check_supported_feature_id(
2634 ENA_ADMIN_INTERRUPT_MODERATION);
2638 ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2639 u32 tx_coalesce_usecs)
2641 if (!ena_dev->intr_delay_resolution) {
2642 ena_trc_err("Illegal interrupt delay granularity value\n");
2643 return ENA_COM_FAULT;
2646 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2647 ena_dev->intr_delay_resolution;
2653 ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2654 u32 rx_coalesce_usecs)
2656 if (!ena_dev->intr_delay_resolution) {
2657 ena_trc_err("Illegal interrupt delay granularity value\n");
2658 return ENA_COM_FAULT;
2661 /* We use LOWEST entry of moderation table for storing
2662 * nonadaptive interrupt coalescing values
2664 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2665 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2670 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2672 if (ena_dev->intr_moder_tbl)
2673 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2674 ena_dev->intr_moder_tbl = NULL;
2677 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2679 struct ena_admin_get_feat_resp get_resp;
2680 u32 delay_resolution;
2683 rc = ena_com_get_feature(ena_dev, &get_resp,
2684 ENA_ADMIN_INTERRUPT_MODERATION);
2687 if (rc == ENA_COM_PERMISSION) {
2688 ena_trc_info("Feature %d isn't supported\n",
2689 ENA_ADMIN_INTERRUPT_MODERATION);
2692 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2696 /* no moderation supported, disable adaptive support */
2697 ena_com_disable_adaptive_moderation(ena_dev);
2701 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2705 /* if moderation is supported by device we set adaptive moderation */
2706 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2707 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2708 ena_com_enable_adaptive_moderation(ena_dev);
2712 ena_com_destroy_interrupt_moderation(ena_dev);
2717 ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2719 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2721 if (!intr_moder_tbl)
2724 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2725 ENA_INTR_LOWEST_USECS;
2726 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2727 ENA_INTR_LOWEST_PKTS;
2728 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2729 ENA_INTR_LOWEST_BYTES;
2731 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2733 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2735 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2738 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2740 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2742 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2745 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2746 ENA_INTR_HIGH_USECS;
2747 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2749 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2750 ENA_INTR_HIGH_BYTES;
2752 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2753 ENA_INTR_HIGHEST_USECS;
2754 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2755 ENA_INTR_HIGHEST_PKTS;
2756 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2757 ENA_INTR_HIGHEST_BYTES;
2761 ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2763 return ena_dev->intr_moder_tx_interval;
2767 ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2769 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2772 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2777 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2778 enum ena_intr_moder_level level,
2779 struct ena_intr_moder_entry *entry)
2781 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2783 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2786 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2787 if (ena_dev->intr_delay_resolution)
2788 intr_moder_tbl[level].intr_moder_interval /=
2789 ena_dev->intr_delay_resolution;
2790 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2791 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2794 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2795 enum ena_intr_moder_level level,
2796 struct ena_intr_moder_entry *entry)
2798 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2800 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2803 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2804 if (ena_dev->intr_delay_resolution)
2805 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2806 entry->pkts_per_interval =
2807 intr_moder_tbl[level].pkts_per_interval;
2808 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;