4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 /*****************************************************************************/
37 /*****************************************************************************/
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (3000000)
42 #define ENA_ASYNC_QUEUE_DEPTH 16
43 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #ifdef ENA_EXTENDED_STATS
47 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
48 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
49 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
51 #endif /* ENA_EXTENDED_STATS */
53 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
54 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
55 | (ENA_COMMON_SPEC_VERSION_MINOR))
57 #define ENA_CTRL_MAJOR 0
58 #define ENA_CTRL_MINOR 0
59 #define ENA_CTRL_SUB_MINOR 1
61 #define MIN_ENA_CTRL_VER \
62 (((ENA_CTRL_MAJOR) << \
63 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
64 ((ENA_CTRL_MINOR) << \
65 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
68 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
69 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
71 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
73 #define ENA_REGS_ADMIN_INTR_MASK 1
77 /*****************************************************************************/
78 /*****************************************************************************/
79 /*****************************************************************************/
84 /* Abort - canceled by the driver */
89 ena_wait_event_t wait_event;
90 struct ena_admin_acq_entry *user_cqe;
92 enum ena_cmd_status status;
93 /* status from the device */
99 struct ena_com_stats_ctx {
100 struct ena_admin_aq_get_stats_cmd get_cmd;
101 struct ena_admin_acq_get_stats_resp get_resp;
104 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
105 struct ena_common_mem_addr *ena_addr,
108 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
109 ena_trc_err("dma address has more bits that the device supports\n");
110 return ENA_COM_INVAL;
113 ena_addr->mem_addr_low = lower_32_bits(addr);
114 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
119 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
121 struct ena_com_admin_sq *sq = &queue->sq;
122 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
124 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
128 ena_trc_err("memory allocation failed");
129 return ENA_COM_NO_MEM;
141 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
143 struct ena_com_admin_cq *cq = &queue->cq;
144 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
146 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
150 ena_trc_err("memory allocation failed");
151 return ENA_COM_NO_MEM;
160 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
161 struct ena_aenq_handlers *aenq_handlers)
163 struct ena_com_aenq *aenq = &dev->aenq;
164 u32 addr_low, addr_high, aenq_caps;
167 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
168 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
169 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
174 if (!aenq->entries) {
175 ena_trc_err("memory allocation failed");
176 return ENA_COM_NO_MEM;
179 aenq->head = aenq->q_depth;
182 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
183 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
185 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
186 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
189 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
190 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
191 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
192 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
193 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
195 if (unlikely(!aenq_handlers)) {
196 ena_trc_err("aenq handlers pointer is NULL\n");
197 return ENA_COM_INVAL;
200 aenq->aenq_handlers = aenq_handlers;
205 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
206 struct ena_comp_ctx *comp_ctx)
208 comp_ctx->occupied = false;
209 ATOMIC32_DEC(&queue->outstanding_cmds);
212 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
213 u16 command_id, bool capture)
215 if (unlikely(command_id >= queue->q_depth)) {
216 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
217 command_id, queue->q_depth);
221 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
222 ena_trc_err("Completion context is occupied\n");
227 ATOMIC32_INC(&queue->outstanding_cmds);
228 queue->comp_ctx[command_id].occupied = true;
231 return &queue->comp_ctx[command_id];
234 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
235 struct ena_admin_aq_entry *cmd,
236 size_t cmd_size_in_bytes,
237 struct ena_admin_acq_entry *comp,
238 size_t comp_size_in_bytes)
240 struct ena_comp_ctx *comp_ctx;
241 u16 tail_masked, cmd_id;
245 queue_size_mask = admin_queue->q_depth - 1;
247 tail_masked = admin_queue->sq.tail & queue_size_mask;
249 /* In case of queue FULL */
250 cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
251 if (cnt >= admin_queue->q_depth) {
252 ena_trc_dbg("admin queue is full.\n");
253 admin_queue->stats.out_of_space++;
254 return ERR_PTR(ENA_COM_NO_SPACE);
257 cmd_id = admin_queue->curr_cmd_id;
259 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
260 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
262 cmd->aq_common_descriptor.command_id |= cmd_id &
263 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
265 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
266 if (unlikely(!comp_ctx))
267 return ERR_PTR(ENA_COM_INVAL);
269 comp_ctx->status = ENA_CMD_SUBMITTED;
270 comp_ctx->comp_size = (u32)comp_size_in_bytes;
271 comp_ctx->user_cqe = comp;
272 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
274 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
276 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
278 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
281 admin_queue->sq.tail++;
282 admin_queue->stats.submitted_cmd++;
284 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
285 admin_queue->sq.phase = !admin_queue->sq.phase;
287 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
288 admin_queue->sq.db_addr);
293 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
295 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
296 struct ena_comp_ctx *comp_ctx;
299 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
300 if (unlikely(!queue->comp_ctx)) {
301 ena_trc_err("memory allocation failed");
302 return ENA_COM_NO_MEM;
305 for (i = 0; i < queue->q_depth; i++) {
306 comp_ctx = get_comp_ctxt(queue, i, false);
308 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
314 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
315 struct ena_admin_aq_entry *cmd,
316 size_t cmd_size_in_bytes,
317 struct ena_admin_acq_entry *comp,
318 size_t comp_size_in_bytes)
320 unsigned long flags = 0;
321 struct ena_comp_ctx *comp_ctx;
323 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
324 if (unlikely(!admin_queue->running_state)) {
325 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
326 return ERR_PTR(ENA_COM_NO_DEVICE);
328 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
332 if (IS_ERR(comp_ctx))
333 admin_queue->running_state = false;
334 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
339 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
340 struct ena_com_create_io_ctx *ctx,
341 struct ena_com_io_sq *io_sq)
346 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
348 io_sq->desc_entry_size =
349 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
350 sizeof(struct ena_eth_io_tx_desc) :
351 sizeof(struct ena_eth_io_rx_desc);
353 size = io_sq->desc_entry_size * io_sq->q_depth;
355 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
356 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
358 io_sq->desc_addr.virt_addr,
359 io_sq->desc_addr.phys_addr,
360 io_sq->desc_addr.mem_handle,
363 if (!io_sq->desc_addr.virt_addr) {
364 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
366 io_sq->desc_addr.virt_addr,
367 io_sq->desc_addr.phys_addr,
368 io_sq->desc_addr.mem_handle);
371 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
373 io_sq->desc_addr.virt_addr,
376 if (!io_sq->desc_addr.virt_addr) {
377 io_sq->desc_addr.virt_addr =
378 ENA_MEM_ALLOC(ena_dev->dmadev, size);
382 if (!io_sq->desc_addr.virt_addr) {
383 ena_trc_err("memory allocation failed");
384 return ENA_COM_NO_MEM;
388 io_sq->next_to_comp = 0;
394 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
395 struct ena_com_create_io_ctx *ctx,
396 struct ena_com_io_cq *io_cq)
401 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
403 /* Use the basic completion descriptor for Rx */
404 io_cq->cdesc_entry_size_in_bytes =
405 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
406 sizeof(struct ena_eth_io_tx_cdesc) :
407 sizeof(struct ena_eth_io_rx_cdesc_base);
409 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
411 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
413 io_cq->cdesc_addr.virt_addr,
414 io_cq->cdesc_addr.phys_addr,
415 io_cq->cdesc_addr.mem_handle,
418 if (!io_cq->cdesc_addr.virt_addr) {
419 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
421 io_cq->cdesc_addr.virt_addr,
422 io_cq->cdesc_addr.phys_addr,
423 io_cq->cdesc_addr.mem_handle);
426 if (!io_cq->cdesc_addr.virt_addr) {
427 ena_trc_err("memory allocation failed");
428 return ENA_COM_NO_MEM;
437 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
438 struct ena_admin_acq_entry *cqe)
440 struct ena_comp_ctx *comp_ctx;
443 cmd_id = cqe->acq_common_descriptor.command &
444 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
446 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
447 if (unlikely(!comp_ctx)) {
448 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
449 admin_queue->running_state = false;
453 comp_ctx->status = ENA_CMD_COMPLETED;
454 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
456 if (comp_ctx->user_cqe)
457 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
459 if (!admin_queue->polling)
460 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
463 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
465 struct ena_admin_acq_entry *cqe = NULL;
470 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
471 phase = admin_queue->cq.phase;
473 cqe = &admin_queue->cq.entries[head_masked];
475 /* Go over all the completions */
476 while ((cqe->acq_common_descriptor.flags &
477 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
478 /* Do not read the rest of the completion entry before the
479 * phase bit was validated
482 ena_com_handle_single_admin_completion(admin_queue, cqe);
486 if (unlikely(head_masked == admin_queue->q_depth)) {
491 cqe = &admin_queue->cq.entries[head_masked];
494 admin_queue->cq.head += comp_num;
495 admin_queue->cq.phase = phase;
496 admin_queue->sq.head += comp_num;
497 admin_queue->stats.completed_cmd += comp_num;
500 static int ena_com_comp_status_to_errno(u8 comp_status)
502 if (unlikely(comp_status != 0))
503 ena_trc_err("admin command failed[%u]\n", comp_status);
505 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
506 return ENA_COM_INVAL;
508 switch (comp_status) {
509 case ENA_ADMIN_SUCCESS:
511 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
512 return ENA_COM_NO_MEM;
513 case ENA_ADMIN_UNSUPPORTED_OPCODE:
514 return ENA_COM_UNSUPPORTED;
515 case ENA_ADMIN_BAD_OPCODE:
516 case ENA_ADMIN_MALFORMED_REQUEST:
517 case ENA_ADMIN_ILLEGAL_PARAMETER:
518 case ENA_ADMIN_UNKNOWN_ERROR:
519 return ENA_COM_INVAL;
525 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
526 struct ena_com_admin_queue *admin_queue)
528 unsigned long flags = 0;
529 unsigned long timeout;
532 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
535 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
536 ena_com_handle_admin_completion(admin_queue);
537 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
539 if (comp_ctx->status != ENA_CMD_SUBMITTED)
542 if (ENA_TIME_EXPIRE(timeout)) {
543 ena_trc_err("Wait for completion (polling) timeout\n");
544 /* ENA didn't have any completion */
545 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
546 admin_queue->stats.no_completion++;
547 admin_queue->running_state = false;
548 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
550 ret = ENA_COM_TIMER_EXPIRED;
554 ENA_MSLEEP(ENA_POLL_MS);
557 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
558 ena_trc_err("Command was aborted\n");
559 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
560 admin_queue->stats.aborted_cmd++;
561 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
562 ret = ENA_COM_NO_DEVICE;
566 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
567 "Invalid comp status %d\n", comp_ctx->status);
569 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
571 comp_ctxt_release(admin_queue, comp_ctx);
575 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
576 struct ena_com_admin_queue *admin_queue)
578 unsigned long flags = 0;
581 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
582 admin_queue->completion_timeout);
584 /* In case the command wasn't completed find out the root cause.
585 * There might be 2 kinds of errors
586 * 1) No completion (timeout reached)
587 * 2) There is completion but the device didn't get any msi-x interrupt.
589 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
590 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
591 ena_com_handle_admin_completion(admin_queue);
592 admin_queue->stats.no_completion++;
593 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
595 if (comp_ctx->status == ENA_CMD_COMPLETED)
596 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
597 comp_ctx->cmd_opcode);
599 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
600 comp_ctx->cmd_opcode, comp_ctx->status);
602 admin_queue->running_state = false;
603 ret = ENA_COM_TIMER_EXPIRED;
607 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
609 comp_ctxt_release(admin_queue, comp_ctx);
613 /* This method read the hardware device register through posting writes
614 * and waiting for response
615 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
617 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
619 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
620 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
621 mmio_read->read_resp;
622 u32 mmio_read_reg, ret, i;
623 unsigned long flags = 0;
624 u32 timeout = mmio_read->reg_read_to;
629 timeout = ENA_REG_READ_TIMEOUT;
631 /* If readless is disabled, perform regular read */
632 if (!mmio_read->readless_supported)
633 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
635 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
636 mmio_read->seq_num++;
638 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
639 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
640 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
641 mmio_read_reg |= mmio_read->seq_num &
642 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
644 /* make sure read_resp->req_id get updated before the hw can write
649 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
651 for (i = 0; i < timeout; i++) {
652 if (read_resp->req_id == mmio_read->seq_num)
658 if (unlikely(i == timeout)) {
659 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
664 ret = ENA_MMIO_READ_TIMEOUT;
668 if (read_resp->reg_off != offset) {
669 ena_trc_err("Read failure: wrong offset provided");
670 ret = ENA_MMIO_READ_TIMEOUT;
672 ret = read_resp->reg_val;
675 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
680 /* There are two types to wait for completion.
681 * Polling mode - wait until the completion is available.
682 * Async mode - wait on wait queue until the completion is ready
683 * (or the timeout expired).
684 * It is expected that the IRQ called ena_com_handle_admin_completion
685 * to mark the completions.
687 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
688 struct ena_com_admin_queue *admin_queue)
690 if (admin_queue->polling)
691 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
694 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
698 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
699 struct ena_com_io_sq *io_sq)
701 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
702 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
703 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
707 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
709 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
710 direction = ENA_ADMIN_SQ_DIRECTION_TX;
712 direction = ENA_ADMIN_SQ_DIRECTION_RX;
714 destroy_cmd.sq.sq_identity |= (direction <<
715 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
716 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
718 destroy_cmd.sq.sq_idx = io_sq->idx;
719 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
721 ret = ena_com_execute_admin_command(admin_queue,
722 (struct ena_admin_aq_entry *)&destroy_cmd,
724 (struct ena_admin_acq_entry *)&destroy_resp,
725 sizeof(destroy_resp));
727 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
728 ena_trc_err("failed to destroy io sq error: %d\n", ret);
733 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
734 struct ena_com_io_sq *io_sq,
735 struct ena_com_io_cq *io_cq)
739 if (io_cq->cdesc_addr.virt_addr) {
740 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
742 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
744 io_cq->cdesc_addr.virt_addr,
745 io_cq->cdesc_addr.phys_addr,
746 io_cq->cdesc_addr.mem_handle);
748 io_cq->cdesc_addr.virt_addr = NULL;
751 if (io_sq->desc_addr.virt_addr) {
752 size = io_sq->desc_entry_size * io_sq->q_depth;
754 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
755 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
757 io_sq->desc_addr.virt_addr,
758 io_sq->desc_addr.phys_addr,
759 io_sq->desc_addr.mem_handle);
761 ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
763 io_sq->desc_addr.virt_addr = NULL;
767 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
772 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
773 timeout = (timeout * 100) / ENA_POLL_MS;
775 for (i = 0; i < timeout; i++) {
776 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
778 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
779 ena_trc_err("Reg read timeout occurred\n");
780 return ENA_COM_TIMER_EXPIRED;
783 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
787 ENA_MSLEEP(ENA_POLL_MS);
790 return ENA_COM_TIMER_EXPIRED;
793 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
794 enum ena_admin_aq_feature_id feature_id)
796 u32 feature_mask = 1 << feature_id;
798 /* Device attributes is always supported */
799 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
800 !(ena_dev->supported_features & feature_mask))
806 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
807 struct ena_admin_get_feat_resp *get_resp,
808 enum ena_admin_aq_feature_id feature_id,
809 dma_addr_t control_buf_dma_addr,
810 u32 control_buff_size)
812 struct ena_com_admin_queue *admin_queue;
813 struct ena_admin_get_feat_cmd get_cmd;
816 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
817 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
818 return ENA_COM_UNSUPPORTED;
821 memset(&get_cmd, 0x0, sizeof(get_cmd));
822 admin_queue = &ena_dev->admin_queue;
824 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
826 if (control_buff_size)
827 get_cmd.aq_common_descriptor.flags =
828 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
830 get_cmd.aq_common_descriptor.flags = 0;
832 ret = ena_com_mem_addr_set(ena_dev,
833 &get_cmd.control_buffer.address,
834 control_buf_dma_addr);
836 ena_trc_err("memory address set failed\n");
840 get_cmd.control_buffer.length = control_buff_size;
842 get_cmd.feat_common.feature_id = feature_id;
844 ret = ena_com_execute_admin_command(admin_queue,
845 (struct ena_admin_aq_entry *)
848 (struct ena_admin_acq_entry *)
853 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
859 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
860 struct ena_admin_get_feat_resp *get_resp,
861 enum ena_admin_aq_feature_id feature_id)
863 return ena_com_get_feature_ex(ena_dev,
870 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
872 struct ena_rss *rss = &ena_dev->rss;
874 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
875 sizeof(*rss->hash_key),
877 rss->hash_key_dma_addr,
878 rss->hash_key_mem_handle);
880 if (unlikely(!rss->hash_key))
881 return ENA_COM_NO_MEM;
886 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
888 struct ena_rss *rss = &ena_dev->rss;
891 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
892 sizeof(*rss->hash_key),
894 rss->hash_key_dma_addr,
895 rss->hash_key_mem_handle);
896 rss->hash_key = NULL;
899 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
901 struct ena_rss *rss = &ena_dev->rss;
903 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
904 sizeof(*rss->hash_ctrl),
906 rss->hash_ctrl_dma_addr,
907 rss->hash_ctrl_mem_handle);
909 if (unlikely(!rss->hash_ctrl))
910 return ENA_COM_NO_MEM;
915 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
917 struct ena_rss *rss = &ena_dev->rss;
920 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
921 sizeof(*rss->hash_ctrl),
923 rss->hash_ctrl_dma_addr,
924 rss->hash_ctrl_mem_handle);
925 rss->hash_ctrl = NULL;
928 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
931 struct ena_rss *rss = &ena_dev->rss;
932 struct ena_admin_get_feat_resp get_resp;
936 ret = ena_com_get_feature(ena_dev, &get_resp,
937 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
941 if ((get_resp.u.ind_table.min_size > log_size) ||
942 (get_resp.u.ind_table.max_size < log_size)) {
943 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
945 1 << get_resp.u.ind_table.min_size,
946 1 << get_resp.u.ind_table.max_size);
947 return ENA_COM_INVAL;
950 tbl_size = (1ULL << log_size) *
951 sizeof(struct ena_admin_rss_ind_table_entry);
953 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
956 rss->rss_ind_tbl_dma_addr,
957 rss->rss_ind_tbl_mem_handle);
958 if (unlikely(!rss->rss_ind_tbl))
961 tbl_size = (1ULL << log_size) * sizeof(u16);
962 rss->host_rss_ind_tbl =
963 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
964 if (unlikely(!rss->host_rss_ind_tbl))
967 rss->tbl_log_size = log_size;
972 tbl_size = (1ULL << log_size) *
973 sizeof(struct ena_admin_rss_ind_table_entry);
975 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
978 rss->rss_ind_tbl_dma_addr,
979 rss->rss_ind_tbl_mem_handle);
980 rss->rss_ind_tbl = NULL;
982 rss->tbl_log_size = 0;
983 return ENA_COM_NO_MEM;
986 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
988 struct ena_rss *rss = &ena_dev->rss;
989 size_t tbl_size = (1ULL << rss->tbl_log_size) *
990 sizeof(struct ena_admin_rss_ind_table_entry);
992 if (rss->rss_ind_tbl)
993 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
996 rss->rss_ind_tbl_dma_addr,
997 rss->rss_ind_tbl_mem_handle);
998 rss->rss_ind_tbl = NULL;
1000 if (rss->host_rss_ind_tbl)
1001 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
1002 rss->host_rss_ind_tbl = NULL;
1005 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1006 struct ena_com_io_sq *io_sq, u16 cq_idx)
1008 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1009 struct ena_admin_aq_create_sq_cmd create_cmd;
1010 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1014 memset(&create_cmd, 0x0, sizeof(create_cmd));
1016 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1018 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1019 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1021 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1023 create_cmd.sq_identity |= (direction <<
1024 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1025 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1027 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1028 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1030 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1031 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1032 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1034 create_cmd.sq_caps_3 |=
1035 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1037 create_cmd.cq_idx = cq_idx;
1038 create_cmd.sq_depth = io_sq->q_depth;
1040 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1041 ret = ena_com_mem_addr_set(ena_dev,
1043 io_sq->desc_addr.phys_addr);
1044 if (unlikely(ret)) {
1045 ena_trc_err("memory address set failed\n");
1050 ret = ena_com_execute_admin_command(admin_queue,
1051 (struct ena_admin_aq_entry *)&create_cmd,
1053 (struct ena_admin_acq_entry *)&cmd_completion,
1054 sizeof(cmd_completion));
1055 if (unlikely(ret)) {
1056 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1060 io_sq->idx = cmd_completion.sq_idx;
1062 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1063 (uintptr_t)cmd_completion.sq_doorbell_offset);
1065 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1066 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1067 + cmd_completion.llq_headers_offset);
1069 io_sq->desc_addr.pbuf_dev_addr =
1070 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1071 cmd_completion.llq_descriptors_offset);
1074 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1079 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1081 struct ena_rss *rss = &ena_dev->rss;
1082 struct ena_com_io_sq *io_sq;
1086 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1087 qid = rss->host_rss_ind_tbl[i];
1088 if (qid >= ENA_TOTAL_NUM_QUEUES)
1089 return ENA_COM_INVAL;
1091 io_sq = &ena_dev->io_sq_queues[qid];
1093 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1094 return ENA_COM_INVAL;
1096 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1102 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1104 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1105 struct ena_rss *rss = &ena_dev->rss;
1109 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1110 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1112 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1113 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1114 return ENA_COM_INVAL;
1115 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1117 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1118 return ENA_COM_INVAL;
1120 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1126 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1130 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1132 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1133 if (!ena_dev->intr_moder_tbl)
1134 return ENA_COM_NO_MEM;
1136 ena_com_config_default_interrupt_moderation_table(ena_dev);
1141 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1142 u16 intr_delay_resolution)
1144 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1147 if (!intr_delay_resolution) {
1148 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1149 intr_delay_resolution = 1;
1151 ena_dev->intr_delay_resolution = intr_delay_resolution;
1154 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1155 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1158 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1161 /*****************************************************************************/
1162 /******************************* API ******************************/
1163 /*****************************************************************************/
1165 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1166 struct ena_admin_aq_entry *cmd,
1168 struct ena_admin_acq_entry *comp,
1171 struct ena_comp_ctx *comp_ctx;
1174 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1176 if (IS_ERR(comp_ctx)) {
1177 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1178 ena_trc_dbg("Failed to submit command [%ld]\n",
1181 ena_trc_err("Failed to submit command [%ld]\n",
1184 return PTR_ERR(comp_ctx);
1187 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1188 if (unlikely(ret)) {
1189 if (admin_queue->running_state)
1190 ena_trc_err("Failed to process command. ret = %d\n",
1193 ena_trc_dbg("Failed to process command. ret = %d\n",
1199 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1200 struct ena_com_io_cq *io_cq)
1202 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1203 struct ena_admin_aq_create_cq_cmd create_cmd;
1204 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1207 memset(&create_cmd, 0x0, sizeof(create_cmd));
1209 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1211 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1212 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1213 create_cmd.cq_caps_1 |=
1214 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1216 create_cmd.msix_vector = io_cq->msix_vector;
1217 create_cmd.cq_depth = io_cq->q_depth;
1219 ret = ena_com_mem_addr_set(ena_dev,
1221 io_cq->cdesc_addr.phys_addr);
1222 if (unlikely(ret)) {
1223 ena_trc_err("memory address set failed\n");
1227 ret = ena_com_execute_admin_command(admin_queue,
1228 (struct ena_admin_aq_entry *)&create_cmd,
1230 (struct ena_admin_acq_entry *)&cmd_completion,
1231 sizeof(cmd_completion));
1232 if (unlikely(ret)) {
1233 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1237 io_cq->idx = cmd_completion.cq_idx;
1239 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1240 cmd_completion.cq_interrupt_unmask_register_offset);
1242 if (cmd_completion.cq_head_db_register_offset)
1243 io_cq->cq_head_db_reg =
1244 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1245 cmd_completion.cq_head_db_register_offset);
1247 if (cmd_completion.numa_node_register_offset)
1248 io_cq->numa_node_cfg_reg =
1249 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1250 cmd_completion.numa_node_register_offset);
1252 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1257 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1258 struct ena_com_io_sq **io_sq,
1259 struct ena_com_io_cq **io_cq)
1261 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1262 ena_trc_err("Invalid queue number %d but the max is %d\n",
1263 qid, ENA_TOTAL_NUM_QUEUES);
1264 return ENA_COM_INVAL;
1267 *io_sq = &ena_dev->io_sq_queues[qid];
1268 *io_cq = &ena_dev->io_cq_queues[qid];
1273 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1275 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1276 struct ena_comp_ctx *comp_ctx;
1279 if (!admin_queue->comp_ctx)
1282 for (i = 0; i < admin_queue->q_depth; i++) {
1283 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1284 if (unlikely(!comp_ctx))
1287 comp_ctx->status = ENA_CMD_ABORTED;
1289 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1293 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1295 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1296 unsigned long flags = 0;
1298 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1299 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1300 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1301 ENA_MSLEEP(ENA_POLL_MS);
1302 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1304 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1307 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1308 struct ena_com_io_cq *io_cq)
1310 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1311 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1312 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1315 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1317 destroy_cmd.cq_idx = io_cq->idx;
1318 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1320 ret = ena_com_execute_admin_command(admin_queue,
1321 (struct ena_admin_aq_entry *)&destroy_cmd,
1322 sizeof(destroy_cmd),
1323 (struct ena_admin_acq_entry *)&destroy_resp,
1324 sizeof(destroy_resp));
1326 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1327 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1332 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1334 return ena_dev->admin_queue.running_state;
1337 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1339 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1340 unsigned long flags = 0;
1342 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1343 ena_dev->admin_queue.running_state = state;
1344 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1347 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1349 u16 depth = ena_dev->aenq.q_depth;
1351 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1353 /* Init head_db to mark that all entries in the queue
1354 * are initially available
1356 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1359 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1361 struct ena_com_admin_queue *admin_queue;
1362 struct ena_admin_set_feat_cmd cmd;
1363 struct ena_admin_set_feat_resp resp;
1364 struct ena_admin_get_feat_resp get_resp;
1367 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1369 ena_trc_info("Can't get aenq configuration\n");
1373 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1374 ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1375 get_resp.u.aenq.supported_groups,
1377 return ENA_COM_UNSUPPORTED;
1380 memset(&cmd, 0x0, sizeof(cmd));
1381 admin_queue = &ena_dev->admin_queue;
1383 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1384 cmd.aq_common_descriptor.flags = 0;
1385 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1386 cmd.u.aenq.enabled_groups = groups_flag;
1388 ret = ena_com_execute_admin_command(admin_queue,
1389 (struct ena_admin_aq_entry *)&cmd,
1391 (struct ena_admin_acq_entry *)&resp,
1395 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1400 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1402 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1405 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1406 ena_trc_err("Reg read timeout occurred\n");
1407 return ENA_COM_TIMER_EXPIRED;
1410 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1411 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1413 ena_trc_dbg("ENA dma width: %d\n", width);
1415 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1416 ena_trc_err("DMA width illegal value: %d\n", width);
1417 return ENA_COM_INVAL;
1420 ena_dev->dma_addr_bits = width;
1425 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1429 u32 ctrl_ver_masked;
1431 /* Make sure the ENA version and the controller version are at least
1432 * as the driver expects
1434 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1435 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1436 ENA_REGS_CONTROLLER_VERSION_OFF);
1438 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1439 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1440 ena_trc_err("Reg read timeout occurred\n");
1441 return ENA_COM_TIMER_EXPIRED;
1444 ena_trc_info("ena device version: %d.%d\n",
1445 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1446 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1447 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1449 if (ver < MIN_ENA_VER) {
1450 ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
1454 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1455 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1456 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1457 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1458 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1459 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1460 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1461 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1464 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1465 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1466 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1468 /* Validate the ctrl version without the implementation ID */
1469 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1470 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1477 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1479 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1480 struct ena_com_admin_cq *cq = &admin_queue->cq;
1481 struct ena_com_admin_sq *sq = &admin_queue->sq;
1482 struct ena_com_aenq *aenq = &ena_dev->aenq;
1485 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1486 if (admin_queue->comp_ctx)
1487 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
1488 admin_queue->comp_ctx = NULL;
1489 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1491 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1492 sq->dma_addr, sq->mem_handle);
1495 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1497 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1498 cq->dma_addr, cq->mem_handle);
1501 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1502 if (ena_dev->aenq.entries)
1503 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1504 aenq->dma_addr, aenq->mem_handle);
1505 aenq->entries = NULL;
1508 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1513 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1515 ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1516 ena_dev->admin_queue.polling = polling;
1519 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1521 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1523 ENA_SPINLOCK_INIT(mmio_read->lock);
1524 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1525 sizeof(*mmio_read->read_resp),
1526 mmio_read->read_resp,
1527 mmio_read->read_resp_dma_addr,
1528 mmio_read->read_resp_mem_handle);
1529 if (unlikely(!mmio_read->read_resp))
1530 return ENA_COM_NO_MEM;
1532 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1534 mmio_read->read_resp->req_id = 0x0;
1535 mmio_read->seq_num = 0x0;
1536 mmio_read->readless_supported = true;
1541 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1543 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1545 mmio_read->readless_supported = readless_supported;
1548 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1550 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1552 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1553 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1555 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1556 sizeof(*mmio_read->read_resp),
1557 mmio_read->read_resp,
1558 mmio_read->read_resp_dma_addr,
1559 mmio_read->read_resp_mem_handle);
1561 mmio_read->read_resp = NULL;
1564 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1566 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1567 u32 addr_low, addr_high;
1569 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1570 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1572 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1573 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1576 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1577 struct ena_aenq_handlers *aenq_handlers,
1580 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1581 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1584 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1586 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1587 ena_trc_err("Reg read timeout occurred\n");
1588 return ENA_COM_TIMER_EXPIRED;
1591 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1592 ena_trc_err("Device isn't ready, abort com init\n");
1593 return ENA_COM_NO_DEVICE;
1596 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1598 admin_queue->q_dmadev = ena_dev->dmadev;
1599 admin_queue->polling = false;
1600 admin_queue->curr_cmd_id = 0;
1602 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1605 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1607 ret = ena_com_init_comp_ctxt(admin_queue);
1611 ret = ena_com_admin_init_sq(admin_queue);
1615 ret = ena_com_admin_init_cq(admin_queue);
1619 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1620 ENA_REGS_AQ_DB_OFF);
1622 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1623 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1625 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1626 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1628 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1629 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1631 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1632 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1635 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1636 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1637 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1638 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1641 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1642 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1643 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1644 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1646 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1647 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1648 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1652 admin_queue->running_state = true;
1656 ena_com_admin_destroy(ena_dev);
1661 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1662 struct ena_com_create_io_ctx *ctx)
1664 struct ena_com_io_sq *io_sq;
1665 struct ena_com_io_cq *io_cq;
1668 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1669 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1670 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1671 return ENA_COM_INVAL;
1674 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1675 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1677 memset(io_sq, 0x0, sizeof(*io_sq));
1678 memset(io_cq, 0x0, sizeof(*io_cq));
1681 io_cq->q_depth = ctx->queue_size;
1682 io_cq->direction = ctx->direction;
1683 io_cq->qid = ctx->qid;
1685 io_cq->msix_vector = ctx->msix_vector;
1687 io_sq->q_depth = ctx->queue_size;
1688 io_sq->direction = ctx->direction;
1689 io_sq->qid = ctx->qid;
1691 io_sq->mem_queue_type = ctx->mem_queue_type;
1693 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1694 /* header length is limited to 8 bits */
1695 io_sq->tx_max_header_size =
1696 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1698 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1701 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1705 ret = ena_com_create_io_cq(ena_dev, io_cq);
1709 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1716 ena_com_destroy_io_cq(ena_dev, io_cq);
1718 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1722 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1724 struct ena_com_io_sq *io_sq;
1725 struct ena_com_io_cq *io_cq;
1727 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1728 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1729 qid, ENA_TOTAL_NUM_QUEUES);
1733 io_sq = &ena_dev->io_sq_queues[qid];
1734 io_cq = &ena_dev->io_cq_queues[qid];
1736 ena_com_destroy_io_sq(ena_dev, io_sq);
1737 ena_com_destroy_io_cq(ena_dev, io_cq);
1739 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1742 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1743 struct ena_admin_get_feat_resp *resp)
1745 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1748 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1749 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1751 struct ena_admin_get_feat_resp get_resp;
1754 rc = ena_com_get_feature(ena_dev, &get_resp,
1755 ENA_ADMIN_DEVICE_ATTRIBUTES);
1759 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1760 sizeof(get_resp.u.dev_attr));
1761 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1763 rc = ena_com_get_feature(ena_dev, &get_resp,
1764 ENA_ADMIN_MAX_QUEUES_NUM);
1768 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1769 sizeof(get_resp.u.max_queue));
1770 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1772 rc = ena_com_get_feature(ena_dev, &get_resp,
1773 ENA_ADMIN_AENQ_CONFIG);
1777 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1778 sizeof(get_resp.u.aenq));
1780 rc = ena_com_get_feature(ena_dev, &get_resp,
1781 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1785 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1786 sizeof(get_resp.u.offload));
1788 /* Driver hints isn't mandatory admin command. So in case the
1789 * command isn't supported set driver hints to 0
1791 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1794 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1795 sizeof(get_resp.u.hw_hints));
1796 else if (rc == ENA_COM_UNSUPPORTED)
1797 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
1804 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1806 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1809 /* ena_handle_specific_aenq_event:
1810 * return the handler that is relevant to the specific event group
1812 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1815 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1817 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1818 return aenq_handlers->handlers[group];
1820 return aenq_handlers->unimplemented_handler;
1823 /* ena_aenq_intr_handler:
1824 * handles the aenq incoming events.
1825 * pop events from the queue and apply the specific handler
1827 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1829 struct ena_admin_aenq_entry *aenq_e;
1830 struct ena_admin_aenq_common_desc *aenq_common;
1831 struct ena_com_aenq *aenq = &dev->aenq;
1832 ena_aenq_handler handler_cb;
1833 unsigned long long timestamp;
1834 u16 masked_head, processed = 0;
1837 masked_head = aenq->head & (aenq->q_depth - 1);
1838 phase = aenq->phase;
1839 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1840 aenq_common = &aenq_e->aenq_common_desc;
1842 /* Go over all the events */
1843 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1845 timestamp = (unsigned long long)aenq_common->timestamp_low |
1846 ((unsigned long long)aenq_common->timestamp_high << 32);
1847 ENA_TOUCH(timestamp); /* In case debug is disabled */
1848 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1850 aenq_common->syndrom,
1853 /* Handle specific event*/
1854 handler_cb = ena_com_get_specific_aenq_cb(dev,
1855 aenq_common->group);
1856 handler_cb(data, aenq_e); /* call the actual event handler*/
1858 /* Get next event entry */
1862 if (unlikely(masked_head == aenq->q_depth)) {
1866 aenq_e = &aenq->entries[masked_head];
1867 aenq_common = &aenq_e->aenq_common_desc;
1870 aenq->head += processed;
1871 aenq->phase = phase;
1873 /* Don't update aenq doorbell if there weren't any processed events */
1877 /* write the aenq doorbell after all AENQ descriptors were read */
1879 ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1882 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
1883 enum ena_regs_reset_reason_types reset_reason)
1885 u32 stat, timeout, cap, reset_val;
1888 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1889 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1891 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1892 (cap == ENA_MMIO_READ_TIMEOUT))) {
1893 ena_trc_err("Reg read32 timeout occurred\n");
1894 return ENA_COM_TIMER_EXPIRED;
1897 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1898 ena_trc_err("Device isn't ready, can't reset device\n");
1899 return ENA_COM_INVAL;
1902 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1903 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1905 ena_trc_err("Invalid timeout value\n");
1906 return ENA_COM_INVAL;
1910 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1911 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
1912 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
1913 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1915 /* Write again the MMIO read request address */
1916 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1918 rc = wait_for_reset_state(ena_dev, timeout,
1919 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1921 ena_trc_err("Reset indication didn't turn on\n");
1926 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1927 rc = wait_for_reset_state(ena_dev, timeout, 0);
1929 ena_trc_err("Reset indication didn't turn off\n");
1933 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
1934 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
1936 /* the resolution of timeout reg is 100ms */
1937 ena_dev->admin_queue.completion_timeout = timeout * 100000;
1939 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1944 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1945 struct ena_com_stats_ctx *ctx,
1946 enum ena_admin_get_stats_type type)
1948 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1949 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1950 struct ena_com_admin_queue *admin_queue;
1953 admin_queue = &ena_dev->admin_queue;
1955 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1956 get_cmd->aq_common_descriptor.flags = 0;
1957 get_cmd->type = type;
1959 ret = ena_com_execute_admin_command(admin_queue,
1960 (struct ena_admin_aq_entry *)get_cmd,
1962 (struct ena_admin_acq_entry *)get_resp,
1966 ena_trc_err("Failed to get stats. error: %d\n", ret);
1971 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1972 struct ena_admin_basic_stats *stats)
1974 struct ena_com_stats_ctx ctx;
1977 memset(&ctx, 0x0, sizeof(ctx));
1978 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
1979 if (likely(ret == 0))
1980 memcpy(stats, &ctx.get_resp.basic_stats,
1981 sizeof(ctx.get_resp.basic_stats));
1986 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
1988 struct ena_com_admin_queue *admin_queue;
1989 struct ena_admin_set_feat_cmd cmd;
1990 struct ena_admin_set_feat_resp resp;
1993 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
1994 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
1995 return ENA_COM_UNSUPPORTED;
1998 memset(&cmd, 0x0, sizeof(cmd));
1999 admin_queue = &ena_dev->admin_queue;
2001 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2002 cmd.aq_common_descriptor.flags = 0;
2003 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2004 cmd.u.mtu.mtu = mtu;
2006 ret = ena_com_execute_admin_command(admin_queue,
2007 (struct ena_admin_aq_entry *)&cmd,
2009 (struct ena_admin_acq_entry *)&resp,
2013 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2018 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2019 struct ena_admin_feature_offload_desc *offload)
2022 struct ena_admin_get_feat_resp resp;
2024 ret = ena_com_get_feature(ena_dev, &resp,
2025 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
2026 if (unlikely(ret)) {
2027 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2031 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2036 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2038 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2039 struct ena_rss *rss = &ena_dev->rss;
2040 struct ena_admin_set_feat_cmd cmd;
2041 struct ena_admin_set_feat_resp resp;
2042 struct ena_admin_get_feat_resp get_resp;
2045 if (!ena_com_check_supported_feature_id(ena_dev,
2046 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2047 ena_trc_dbg("Feature %d isn't supported\n",
2048 ENA_ADMIN_RSS_HASH_FUNCTION);
2049 return ENA_COM_UNSUPPORTED;
2052 /* Validate hash function is supported */
2053 ret = ena_com_get_feature(ena_dev, &get_resp,
2054 ENA_ADMIN_RSS_HASH_FUNCTION);
2058 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2059 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2061 return ENA_COM_UNSUPPORTED;
2064 memset(&cmd, 0x0, sizeof(cmd));
2066 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2067 cmd.aq_common_descriptor.flags =
2068 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2069 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2070 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2071 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2073 ret = ena_com_mem_addr_set(ena_dev,
2074 &cmd.control_buffer.address,
2075 rss->hash_key_dma_addr);
2076 if (unlikely(ret)) {
2077 ena_trc_err("memory address set failed\n");
2081 cmd.control_buffer.length = sizeof(*rss->hash_key);
2083 ret = ena_com_execute_admin_command(admin_queue,
2084 (struct ena_admin_aq_entry *)&cmd,
2086 (struct ena_admin_acq_entry *)&resp,
2088 if (unlikely(ret)) {
2089 ena_trc_err("Failed to set hash function %d. error: %d\n",
2090 rss->hash_func, ret);
2091 return ENA_COM_INVAL;
2097 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2098 enum ena_admin_hash_functions func,
2099 const u8 *key, u16 key_len, u32 init_val)
2101 struct ena_rss *rss = &ena_dev->rss;
2102 struct ena_admin_get_feat_resp get_resp;
2103 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2107 /* Make sure size is a mult of DWs */
2108 if (unlikely(key_len & 0x3))
2109 return ENA_COM_INVAL;
2111 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2112 ENA_ADMIN_RSS_HASH_FUNCTION,
2113 rss->hash_key_dma_addr,
2114 sizeof(*rss->hash_key));
2118 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2119 ena_trc_err("Flow hash function %d isn't supported\n", func);
2120 return ENA_COM_UNSUPPORTED;
2124 case ENA_ADMIN_TOEPLITZ:
2125 if (key_len > sizeof(hash_key->key)) {
2126 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2127 key_len, sizeof(hash_key->key));
2128 return ENA_COM_INVAL;
2131 memcpy(hash_key->key, key, key_len);
2132 rss->hash_init_val = init_val;
2133 hash_key->keys_num = key_len >> 2;
2135 case ENA_ADMIN_CRC32:
2136 rss->hash_init_val = init_val;
2139 ena_trc_err("Invalid hash function (%d)\n", func);
2140 return ENA_COM_INVAL;
2143 rc = ena_com_set_hash_function(ena_dev);
2145 /* Restore the old function */
2147 ena_com_get_hash_function(ena_dev, NULL, NULL);
2152 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2153 enum ena_admin_hash_functions *func,
2156 struct ena_rss *rss = &ena_dev->rss;
2157 struct ena_admin_get_feat_resp get_resp;
2158 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2162 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2163 ENA_ADMIN_RSS_HASH_FUNCTION,
2164 rss->hash_key_dma_addr,
2165 sizeof(*rss->hash_key));
2169 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2171 *func = rss->hash_func;
2174 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2179 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2180 enum ena_admin_flow_hash_proto proto,
2183 struct ena_rss *rss = &ena_dev->rss;
2184 struct ena_admin_get_feat_resp get_resp;
2187 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2188 ENA_ADMIN_RSS_HASH_INPUT,
2189 rss->hash_ctrl_dma_addr,
2190 sizeof(*rss->hash_ctrl));
2195 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2200 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2202 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2203 struct ena_rss *rss = &ena_dev->rss;
2204 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2205 struct ena_admin_set_feat_cmd cmd;
2206 struct ena_admin_set_feat_resp resp;
2209 if (!ena_com_check_supported_feature_id(ena_dev,
2210 ENA_ADMIN_RSS_HASH_INPUT)) {
2211 ena_trc_dbg("Feature %d isn't supported\n",
2212 ENA_ADMIN_RSS_HASH_INPUT);
2213 return ENA_COM_UNSUPPORTED;
2216 memset(&cmd, 0x0, sizeof(cmd));
2218 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2219 cmd.aq_common_descriptor.flags =
2220 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2221 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2222 cmd.u.flow_hash_input.enabled_input_sort =
2223 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2224 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2226 ret = ena_com_mem_addr_set(ena_dev,
2227 &cmd.control_buffer.address,
2228 rss->hash_ctrl_dma_addr);
2229 if (unlikely(ret)) {
2230 ena_trc_err("memory address set failed\n");
2233 cmd.control_buffer.length = sizeof(*hash_ctrl);
2235 ret = ena_com_execute_admin_command(admin_queue,
2236 (struct ena_admin_aq_entry *)&cmd,
2238 (struct ena_admin_acq_entry *)&resp,
2241 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2246 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2248 struct ena_rss *rss = &ena_dev->rss;
2249 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2251 u16 available_fields = 0;
2254 /* Get the supported hash input */
2255 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2259 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2260 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2261 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2263 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2264 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2265 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2267 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2268 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2269 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2271 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2272 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2273 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2275 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2276 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2278 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2279 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2281 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2282 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2284 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2285 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2287 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2288 available_fields = hash_ctrl->selected_fields[i].fields &
2289 hash_ctrl->supported_fields[i].fields;
2290 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2291 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2292 i, hash_ctrl->supported_fields[i].fields,
2293 hash_ctrl->selected_fields[i].fields);
2294 return ENA_COM_UNSUPPORTED;
2298 rc = ena_com_set_hash_ctrl(ena_dev);
2300 /* In case of failure, restore the old hash ctrl */
2302 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2307 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2308 enum ena_admin_flow_hash_proto proto,
2311 struct ena_rss *rss = &ena_dev->rss;
2312 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2313 u16 supported_fields;
2316 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2317 ena_trc_err("Invalid proto num (%u)\n", proto);
2318 return ENA_COM_INVAL;
2321 /* Get the ctrl table */
2322 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2326 /* Make sure all the fields are supported */
2327 supported_fields = hash_ctrl->supported_fields[proto].fields;
2328 if ((hash_fields & supported_fields) != hash_fields) {
2329 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2330 proto, hash_fields, supported_fields);
2333 hash_ctrl->selected_fields[proto].fields = hash_fields;
2335 rc = ena_com_set_hash_ctrl(ena_dev);
2337 /* In case of failure, restore the old hash ctrl */
2339 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2344 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2345 u16 entry_idx, u16 entry_value)
2347 struct ena_rss *rss = &ena_dev->rss;
2349 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2350 return ENA_COM_INVAL;
2352 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2353 return ENA_COM_INVAL;
2355 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2360 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2362 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2363 struct ena_rss *rss = &ena_dev->rss;
2364 struct ena_admin_set_feat_cmd cmd;
2365 struct ena_admin_set_feat_resp resp;
2368 if (!ena_com_check_supported_feature_id(ena_dev,
2369 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2370 ena_trc_dbg("Feature %d isn't supported\n",
2371 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2372 return ENA_COM_UNSUPPORTED;
2375 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2377 ena_trc_err("Failed to convert host indirection table to device table\n");
2381 memset(&cmd, 0x0, sizeof(cmd));
2383 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2384 cmd.aq_common_descriptor.flags =
2385 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2386 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2387 cmd.u.ind_table.size = rss->tbl_log_size;
2388 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2390 ret = ena_com_mem_addr_set(ena_dev,
2391 &cmd.control_buffer.address,
2392 rss->rss_ind_tbl_dma_addr);
2393 if (unlikely(ret)) {
2394 ena_trc_err("memory address set failed\n");
2398 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2399 sizeof(struct ena_admin_rss_ind_table_entry);
2401 ret = ena_com_execute_admin_command(admin_queue,
2402 (struct ena_admin_aq_entry *)&cmd,
2404 (struct ena_admin_acq_entry *)&resp,
2408 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2413 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2415 struct ena_rss *rss = &ena_dev->rss;
2416 struct ena_admin_get_feat_resp get_resp;
2420 tbl_size = (1ULL << rss->tbl_log_size) *
2421 sizeof(struct ena_admin_rss_ind_table_entry);
2423 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2424 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2425 rss->rss_ind_tbl_dma_addr,
2433 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2437 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2438 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2443 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2447 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2449 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2453 rc = ena_com_hash_key_allocate(ena_dev);
2457 rc = ena_com_hash_ctrl_init(ena_dev);
2464 ena_com_hash_key_destroy(ena_dev);
2466 ena_com_indirect_table_destroy(ena_dev);
2472 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2474 ena_com_indirect_table_destroy(ena_dev);
2475 ena_com_hash_key_destroy(ena_dev);
2476 ena_com_hash_ctrl_destroy(ena_dev);
2478 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2481 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2483 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2485 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2487 host_attr->host_info,
2488 host_attr->host_info_dma_addr,
2489 host_attr->host_info_dma_handle);
2490 if (unlikely(!host_attr->host_info))
2491 return ENA_COM_NO_MEM;
2496 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2497 u32 debug_area_size)
2499 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2501 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2503 host_attr->debug_area_virt_addr,
2504 host_attr->debug_area_dma_addr,
2505 host_attr->debug_area_dma_handle);
2506 if (unlikely(!host_attr->debug_area_virt_addr)) {
2507 host_attr->debug_area_size = 0;
2508 return ENA_COM_NO_MEM;
2511 host_attr->debug_area_size = debug_area_size;
2516 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2518 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2520 if (host_attr->host_info) {
2521 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2523 host_attr->host_info,
2524 host_attr->host_info_dma_addr,
2525 host_attr->host_info_dma_handle);
2526 host_attr->host_info = NULL;
2530 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2532 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2534 if (host_attr->debug_area_virt_addr) {
2535 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2536 host_attr->debug_area_size,
2537 host_attr->debug_area_virt_addr,
2538 host_attr->debug_area_dma_addr,
2539 host_attr->debug_area_dma_handle);
2540 host_attr->debug_area_virt_addr = NULL;
2544 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2546 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2547 struct ena_com_admin_queue *admin_queue;
2548 struct ena_admin_set_feat_cmd cmd;
2549 struct ena_admin_set_feat_resp resp;
2553 /* Host attribute config is called before ena_com_get_dev_attr_feat
2554 * so ena_com can't check if the feature is supported.
2557 memset(&cmd, 0x0, sizeof(cmd));
2558 admin_queue = &ena_dev->admin_queue;
2560 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2561 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2563 ret = ena_com_mem_addr_set(ena_dev,
2564 &cmd.u.host_attr.debug_ba,
2565 host_attr->debug_area_dma_addr);
2566 if (unlikely(ret)) {
2567 ena_trc_err("memory address set failed\n");
2571 ret = ena_com_mem_addr_set(ena_dev,
2572 &cmd.u.host_attr.os_info_ba,
2573 host_attr->host_info_dma_addr);
2574 if (unlikely(ret)) {
2575 ena_trc_err("memory address set failed\n");
2579 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2581 ret = ena_com_execute_admin_command(admin_queue,
2582 (struct ena_admin_aq_entry *)&cmd,
2584 (struct ena_admin_acq_entry *)&resp,
2588 ena_trc_err("Failed to set host attributes: %d\n", ret);
2593 /* Interrupt moderation */
2594 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2596 return ena_com_check_supported_feature_id(ena_dev,
2597 ENA_ADMIN_INTERRUPT_MODERATION);
2600 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2601 u32 tx_coalesce_usecs)
2603 if (!ena_dev->intr_delay_resolution) {
2604 ena_trc_err("Illegal interrupt delay granularity value\n");
2605 return ENA_COM_FAULT;
2608 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2609 ena_dev->intr_delay_resolution;
2614 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2615 u32 rx_coalesce_usecs)
2617 if (!ena_dev->intr_delay_resolution) {
2618 ena_trc_err("Illegal interrupt delay granularity value\n");
2619 return ENA_COM_FAULT;
2622 /* We use LOWEST entry of moderation table for storing
2623 * nonadaptive interrupt coalescing values
2625 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2626 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2631 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2633 if (ena_dev->intr_moder_tbl)
2634 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2635 ena_dev->intr_moder_tbl = NULL;
2638 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2640 struct ena_admin_get_feat_resp get_resp;
2641 u16 delay_resolution;
2644 rc = ena_com_get_feature(ena_dev, &get_resp,
2645 ENA_ADMIN_INTERRUPT_MODERATION);
2648 if (rc == ENA_COM_UNSUPPORTED) {
2649 ena_trc_dbg("Feature %d isn't supported\n",
2650 ENA_ADMIN_INTERRUPT_MODERATION);
2653 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2657 /* no moderation supported, disable adaptive support */
2658 ena_com_disable_adaptive_moderation(ena_dev);
2662 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2666 /* if moderation is supported by device we set adaptive moderation */
2667 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2668 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2669 ena_com_enable_adaptive_moderation(ena_dev);
2673 ena_com_destroy_interrupt_moderation(ena_dev);
2677 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2679 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2681 if (!intr_moder_tbl)
2684 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2685 ENA_INTR_LOWEST_USECS;
2686 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2687 ENA_INTR_LOWEST_PKTS;
2688 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2689 ENA_INTR_LOWEST_BYTES;
2691 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2693 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2695 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2698 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2700 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2702 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2705 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2706 ENA_INTR_HIGH_USECS;
2707 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2709 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2710 ENA_INTR_HIGH_BYTES;
2712 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2713 ENA_INTR_HIGHEST_USECS;
2714 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2715 ENA_INTR_HIGHEST_PKTS;
2716 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2717 ENA_INTR_HIGHEST_BYTES;
2720 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2722 return ena_dev->intr_moder_tx_interval;
2725 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2727 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2730 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2735 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2736 enum ena_intr_moder_level level,
2737 struct ena_intr_moder_entry *entry)
2739 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2741 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2744 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2745 if (ena_dev->intr_delay_resolution)
2746 intr_moder_tbl[level].intr_moder_interval /=
2747 ena_dev->intr_delay_resolution;
2748 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2750 /* use hardcoded value until ethtool supports bytecount parameter */
2751 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2752 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2755 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2756 enum ena_intr_moder_level level,
2757 struct ena_intr_moder_entry *entry)
2759 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2761 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2764 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2765 if (ena_dev->intr_delay_resolution)
2766 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2767 entry->pkts_per_interval =
2768 intr_moder_tbl[level].pkts_per_interval;
2769 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;