4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 /*****************************************************************************/
37 /*****************************************************************************/
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (1000000)
42 #define ENA_ASYNC_QUEUE_DEPTH 4
43 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
46 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
47 | (ENA_COMMON_SPEC_VERSION_MINOR))
49 #define ENA_CTRL_MAJOR 0
50 #define ENA_CTRL_MINOR 0
51 #define ENA_CTRL_SUB_MINOR 1
53 #define MIN_ENA_CTRL_VER \
54 (((ENA_CTRL_MAJOR) << \
55 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
56 ((ENA_CTRL_MINOR) << \
57 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
60 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
61 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
63 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
65 static int ena_alloc_cnt;
67 /*****************************************************************************/
68 /*****************************************************************************/
69 /*****************************************************************************/
74 /* Abort - canceled by the driver */
79 ena_wait_event_t wait_event;
80 struct ena_admin_acq_entry *user_cqe;
82 enum ena_cmd_status status;
83 /* status from the device */
89 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
90 struct ena_common_mem_addr *ena_addr,
93 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
94 ena_trc_err("dma address has more bits that the device supports\n");
98 ena_addr->mem_addr_low = (u32)addr;
99 ena_addr->mem_addr_high =
100 ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 32)) >> 32);
105 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
107 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
108 ADMIN_SQ_SIZE(queue->q_depth),
111 queue->sq.mem_handle);
113 if (!queue->sq.entries) {
114 ena_trc_err("memory allocation failed");
115 return ENA_COM_NO_MEM;
122 queue->sq.db_addr = NULL;
127 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
129 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
130 ADMIN_CQ_SIZE(queue->q_depth),
133 queue->cq.mem_handle);
135 if (!queue->cq.entries) {
136 ena_trc_err("memory allocation failed");
137 return ENA_COM_NO_MEM;
146 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
147 struct ena_aenq_handlers *aenq_handlers)
149 u32 addr_low, addr_high, aenq_caps;
151 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
152 ENA_MEM_ALLOC_COHERENT(dev->dmadev,
153 ADMIN_AENQ_SIZE(dev->aenq.q_depth),
156 dev->aenq.mem_handle);
158 if (!dev->aenq.entries) {
159 ena_trc_err("memory allocation failed");
160 return ENA_COM_NO_MEM;
163 dev->aenq.head = dev->aenq.q_depth;
166 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(dev->aenq.dma_addr);
167 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(dev->aenq.dma_addr);
169 ENA_REG_WRITE32(addr_low, (unsigned char *)dev->reg_bar
170 + ENA_REGS_AENQ_BASE_LO_OFF);
171 ENA_REG_WRITE32(addr_high, (unsigned char *)dev->reg_bar
172 + ENA_REGS_AENQ_BASE_HI_OFF);
175 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
176 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
177 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
178 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
180 ENA_REG_WRITE32(aenq_caps, (unsigned char *)dev->reg_bar
181 + ENA_REGS_AENQ_CAPS_OFF);
183 if (unlikely(!aenq_handlers))
184 ena_trc_err("aenq handlers pointer is NULL\n");
186 dev->aenq.aenq_handlers = aenq_handlers;
191 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
192 struct ena_comp_ctx *comp_ctx)
194 comp_ctx->occupied = false;
195 ATOMIC32_DEC(&queue->outstanding_cmds);
198 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
199 u16 command_id, bool capture)
201 if (unlikely(command_id >= queue->q_depth)) {
202 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
203 command_id, queue->q_depth);
207 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
208 ena_trc_err("Completion context is occupied\n");
213 ATOMIC32_INC(&queue->outstanding_cmds);
214 queue->comp_ctx[command_id].occupied = true;
217 return &queue->comp_ctx[command_id];
220 static struct ena_comp_ctx *
221 __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
222 struct ena_admin_aq_entry *cmd,
223 size_t cmd_size_in_bytes,
224 struct ena_admin_acq_entry *comp,
225 size_t comp_size_in_bytes)
227 struct ena_comp_ctx *comp_ctx;
228 u16 tail_masked, cmd_id;
232 queue_size_mask = admin_queue->q_depth - 1;
234 tail_masked = admin_queue->sq.tail & queue_size_mask;
236 /* In case of queue FULL */
237 cnt = admin_queue->sq.tail - admin_queue->sq.head;
238 if (cnt >= admin_queue->q_depth) {
239 ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
240 admin_queue->sq.tail,
241 admin_queue->sq.head,
242 admin_queue->q_depth);
243 admin_queue->stats.out_of_space++;
244 return ERR_PTR(ENA_COM_NO_SPACE);
247 cmd_id = admin_queue->curr_cmd_id;
249 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
250 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
252 cmd->aq_common_descriptor.command_id |= cmd_id &
253 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
255 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
257 comp_ctx->status = ENA_CMD_SUBMITTED;
258 comp_ctx->comp_size = (u32)comp_size_in_bytes;
259 comp_ctx->user_cqe = comp;
260 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
262 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
264 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
266 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
269 admin_queue->sq.tail++;
270 admin_queue->stats.submitted_cmd++;
272 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
273 admin_queue->sq.phase = !admin_queue->sq.phase;
275 ENA_REG_WRITE32(admin_queue->sq.tail, admin_queue->sq.db_addr);
280 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
282 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
283 struct ena_comp_ctx *comp_ctx;
286 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
287 if (unlikely(!queue->comp_ctx)) {
288 ena_trc_err("memory allocation failed");
289 return ENA_COM_NO_MEM;
292 for (i = 0; i < queue->q_depth; i++) {
293 comp_ctx = get_comp_ctxt(queue, i, false);
295 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
301 static struct ena_comp_ctx *
302 ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
303 struct ena_admin_aq_entry *cmd,
304 size_t cmd_size_in_bytes,
305 struct ena_admin_acq_entry *comp,
306 size_t comp_size_in_bytes)
308 unsigned long flags = 0;
309 struct ena_comp_ctx *comp_ctx;
311 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
312 if (unlikely(!admin_queue->running_state)) {
313 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
314 return ERR_PTR(ENA_COM_NO_DEVICE);
316 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
320 if (unlikely(IS_ERR(comp_ctx)))
321 admin_queue->running_state = false;
322 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
327 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
328 struct ena_com_create_io_ctx *ctx,
329 struct ena_com_io_sq *io_sq)
335 memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
337 io_sq->desc_entry_size =
338 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
339 sizeof(struct ena_eth_io_tx_desc) :
340 sizeof(struct ena_eth_io_rx_desc);
342 size = io_sq->desc_entry_size * io_sq->q_depth;
344 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
345 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
347 io_sq->desc_addr.virt_addr,
348 io_sq->desc_addr.phys_addr,
349 io_sq->desc_addr.mem_handle);
351 io_sq->desc_addr.virt_addr =
352 ENA_MEM_ALLOC(ena_dev->dmadev, size);
354 if (!io_sq->desc_addr.virt_addr) {
355 ena_trc_err("memory allocation failed");
356 return ENA_COM_NO_MEM;
360 io_sq->next_to_comp = 0;
366 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
367 struct ena_com_create_io_ctx *ctx,
368 struct ena_com_io_cq *io_cq)
373 memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
375 /* Use the basic completion descriptor for Rx */
376 io_cq->cdesc_entry_size_in_bytes =
377 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
378 sizeof(struct ena_eth_io_tx_cdesc) :
379 sizeof(struct ena_eth_io_rx_cdesc_base);
381 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
383 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
385 io_cq->cdesc_addr.virt_addr,
386 io_cq->cdesc_addr.phys_addr,
387 io_cq->cdesc_addr.mem_handle);
389 if (!io_cq->cdesc_addr.virt_addr) {
390 ena_trc_err("memory allocation failed");
391 return ENA_COM_NO_MEM;
401 ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
402 struct ena_admin_acq_entry *cqe)
404 struct ena_comp_ctx *comp_ctx;
407 cmd_id = cqe->acq_common_descriptor.command &
408 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
410 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
411 if (unlikely(!comp_ctx)) {
412 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
413 admin_queue->running_state = false;
417 comp_ctx->status = ENA_CMD_COMPLETED;
418 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
420 if (comp_ctx->user_cqe)
421 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
423 if (!admin_queue->polling)
424 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
428 ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
430 struct ena_admin_acq_entry *cqe = NULL;
435 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
436 phase = admin_queue->cq.phase;
438 cqe = &admin_queue->cq.entries[head_masked];
440 /* Go over all the completions */
441 while ((cqe->acq_common_descriptor.flags &
442 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
443 /* Do not read the rest of the completion entry before the
444 * phase bit was validated
447 ena_com_handle_single_admin_completion(admin_queue, cqe);
451 if (unlikely(head_masked == admin_queue->q_depth)) {
456 cqe = &admin_queue->cq.entries[head_masked];
459 admin_queue->cq.head += comp_num;
460 admin_queue->cq.phase = phase;
461 admin_queue->sq.head += comp_num;
462 admin_queue->stats.completed_cmd += comp_num;
465 static int ena_com_comp_status_to_errno(u8 comp_status)
467 if (unlikely(comp_status != 0))
468 ena_trc_err("admin command failed[%u]\n", comp_status);
470 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
471 return ENA_COM_INVAL;
473 switch (comp_status) {
474 case ENA_ADMIN_SUCCESS:
476 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
477 return ENA_COM_NO_MEM;
478 case ENA_ADMIN_UNSUPPORTED_OPCODE:
479 return ENA_COM_PERMISSION;
480 case ENA_ADMIN_BAD_OPCODE:
481 case ENA_ADMIN_MALFORMED_REQUEST:
482 case ENA_ADMIN_ILLEGAL_PARAMETER:
483 case ENA_ADMIN_UNKNOWN_ERROR:
484 return ENA_COM_INVAL;
491 ena_com_wait_and_process_admin_cq_polling(
492 struct ena_comp_ctx *comp_ctx,
493 struct ena_com_admin_queue *admin_queue)
495 unsigned long flags = 0;
499 start_time = ENA_GET_SYSTEM_USECS();
501 while (comp_ctx->status == ENA_CMD_SUBMITTED) {
502 if ((ENA_GET_SYSTEM_USECS() - start_time) >
503 ADMIN_CMD_TIMEOUT_US) {
504 ena_trc_err("Wait for completion (polling) timeout\n");
505 /* ENA didn't have any completion */
506 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
507 admin_queue->stats.no_completion++;
508 admin_queue->running_state = false;
509 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
511 ret = ENA_COM_TIMER_EXPIRED;
515 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
516 ena_com_handle_admin_completion(admin_queue);
517 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
520 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
521 ena_trc_err("Command was aborted\n");
522 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
523 admin_queue->stats.aborted_cmd++;
524 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
525 ret = ENA_COM_NO_DEVICE;
529 ENA_ASSERT(comp_ctx->status == ENA_CMD_COMPLETED,
530 "Invalid comp status %d\n", comp_ctx->status);
532 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
534 comp_ctxt_release(admin_queue, comp_ctx);
539 ena_com_wait_and_process_admin_cq_interrupts(
540 struct ena_comp_ctx *comp_ctx,
541 struct ena_com_admin_queue *admin_queue)
543 unsigned long flags = 0;
546 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
547 ADMIN_CMD_TIMEOUT_US);
549 /* In case the command wasn't completed find out the root cause.
550 * There might be 2 kinds of errors
551 * 1) No completion (timeout reached)
552 * 2) There is completion but the device didn't get any msi-x interrupt.
554 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
555 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
556 ena_com_handle_admin_completion(admin_queue);
557 admin_queue->stats.no_completion++;
558 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
560 if (comp_ctx->status == ENA_CMD_COMPLETED)
561 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
562 comp_ctx->cmd_opcode);
564 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
565 comp_ctx->cmd_opcode, comp_ctx->status);
567 admin_queue->running_state = false;
568 ret = ENA_COM_TIMER_EXPIRED;
572 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
574 comp_ctxt_release(admin_queue, comp_ctx);
578 /* This method read the hardware device register through posting writes
579 * and waiting for response
580 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
582 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
584 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
585 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
586 mmio_read->read_resp;
587 u32 mmio_read_reg, ret;
588 unsigned long flags = 0;
593 /* If readless is disabled, perform regular read */
594 if (!mmio_read->readless_supported)
595 return ENA_REG_READ32((unsigned char *)ena_dev->reg_bar +
598 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
599 mmio_read->seq_num++;
601 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
602 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
603 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
604 mmio_read_reg |= mmio_read->seq_num &
605 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
607 /* make sure read_resp->req_id get updated before the hw can write
612 ENA_REG_WRITE32(mmio_read_reg, (unsigned char *)ena_dev->reg_bar
613 + ENA_REGS_MMIO_REG_READ_OFF);
615 for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
616 if (read_resp->req_id == mmio_read->seq_num)
622 if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
623 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
628 ret = ENA_MMIO_READ_TIMEOUT;
632 if (read_resp->reg_off != offset) {
633 ena_trc_err("reading failed for wrong offset value");
634 ret = ENA_MMIO_READ_TIMEOUT;
636 ret = read_resp->reg_val;
639 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
644 /* There are two types to wait for completion.
645 * Polling mode - wait until the completion is available.
646 * Async mode - wait on wait queue until the completion is ready
647 * (or the timeout expired).
648 * It is expected that the IRQ called ena_com_handle_admin_completion
649 * to mark the completions.
652 ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
653 struct ena_com_admin_queue *admin_queue)
655 if (admin_queue->polling)
656 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
659 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
663 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
664 struct ena_com_io_sq *io_sq)
666 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
667 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
668 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
672 memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
674 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
675 direction = ENA_ADMIN_SQ_DIRECTION_TX;
677 direction = ENA_ADMIN_SQ_DIRECTION_RX;
679 destroy_cmd.sq.sq_identity |= (direction <<
680 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
681 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
683 destroy_cmd.sq.sq_idx = io_sq->idx;
684 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
686 ret = ena_com_execute_admin_command(
688 (struct ena_admin_aq_entry *)&destroy_cmd,
690 (struct ena_admin_acq_entry *)&destroy_resp,
691 sizeof(destroy_resp));
693 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
694 ena_trc_err("failed to destroy io sq error: %d\n", ret);
699 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
700 struct ena_com_io_sq *io_sq,
701 struct ena_com_io_cq *io_cq)
705 if (io_cq->cdesc_addr.virt_addr) {
706 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
708 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
710 io_cq->cdesc_addr.virt_addr,
711 io_cq->cdesc_addr.phys_addr,
712 io_cq->cdesc_addr.mem_handle);
714 io_cq->cdesc_addr.virt_addr = NULL;
717 if (io_sq->desc_addr.virt_addr) {
718 size = io_sq->desc_entry_size * io_sq->q_depth;
720 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
721 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
723 io_sq->desc_addr.virt_addr,
724 io_sq->desc_addr.phys_addr,
725 io_sq->desc_addr.mem_handle);
727 ENA_MEM_FREE(ena_dev->dmadev,
728 io_sq->desc_addr.virt_addr);
730 io_sq->desc_addr.virt_addr = NULL;
734 static int wait_for_reset_state(struct ena_com_dev *ena_dev,
735 u32 timeout, u16 exp_state)
739 for (i = 0; i < timeout; i++) {
740 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
742 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
743 ena_trc_err("Reg read timeout occurred\n");
744 return ENA_COM_TIMER_EXPIRED;
747 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
751 /* The resolution of the timeout is 100ms */
755 return ENA_COM_TIMER_EXPIRED;
759 ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
760 enum ena_admin_aq_feature_id feature_id)
762 u32 feature_mask = 1 << feature_id;
764 /* Device attributes is always supported */
765 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
766 !(ena_dev->supported_features & feature_mask))
772 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
773 struct ena_admin_get_feat_resp *get_resp,
774 enum ena_admin_aq_feature_id feature_id,
775 dma_addr_t control_buf_dma_addr,
776 u32 control_buff_size)
778 struct ena_com_admin_queue *admin_queue;
779 struct ena_admin_get_feat_cmd get_cmd;
783 ena_trc_err("%s : ena_dev is NULL\n", __func__);
784 return ENA_COM_NO_DEVICE;
787 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
788 ena_trc_info("Feature %d isn't supported\n", feature_id);
789 return ENA_COM_PERMISSION;
792 memset(&get_cmd, 0x0, sizeof(get_cmd));
793 admin_queue = &ena_dev->admin_queue;
795 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
797 if (control_buff_size)
798 get_cmd.aq_common_descriptor.flags =
799 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
801 get_cmd.aq_common_descriptor.flags = 0;
803 ret = ena_com_mem_addr_set(ena_dev,
804 &get_cmd.control_buffer.address,
805 control_buf_dma_addr);
807 ena_trc_err("memory address set failed\n");
811 get_cmd.control_buffer.length = control_buff_size;
813 get_cmd.feat_common.feature_id = feature_id;
815 ret = ena_com_execute_admin_command(admin_queue,
816 (struct ena_admin_aq_entry *)
819 (struct ena_admin_acq_entry *)
824 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
830 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
831 struct ena_admin_get_feat_resp *get_resp,
832 enum ena_admin_aq_feature_id feature_id)
834 return ena_com_get_feature_ex(ena_dev,
841 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
843 struct ena_rss *rss = &ena_dev->rss;
845 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
846 sizeof(*rss->hash_key),
848 rss->hash_key_dma_addr,
849 rss->hash_key_mem_handle);
851 if (unlikely(!rss->hash_key))
852 return ENA_COM_NO_MEM;
857 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
859 struct ena_rss *rss = &ena_dev->rss;
862 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
863 sizeof(*rss->hash_key),
865 rss->hash_key_dma_addr,
866 rss->hash_key_mem_handle);
867 rss->hash_key = NULL;
870 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
872 struct ena_rss *rss = &ena_dev->rss;
874 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
875 sizeof(*rss->hash_ctrl),
877 rss->hash_ctrl_dma_addr,
878 rss->hash_ctrl_mem_handle);
880 if (unlikely(!rss->hash_ctrl))
881 return ENA_COM_NO_MEM;
886 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
888 struct ena_rss *rss = &ena_dev->rss;
891 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
892 sizeof(*rss->hash_ctrl),
894 rss->hash_ctrl_dma_addr,
895 rss->hash_ctrl_mem_handle);
896 rss->hash_ctrl = NULL;
899 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
902 struct ena_rss *rss = &ena_dev->rss;
903 struct ena_admin_get_feat_resp get_resp;
907 ret = ena_com_get_feature(ena_dev, &get_resp,
908 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
912 if ((get_resp.u.ind_table.min_size > log_size) ||
913 (get_resp.u.ind_table.max_size < log_size)) {
914 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
916 1 << get_resp.u.ind_table.min_size,
917 1 << get_resp.u.ind_table.max_size);
918 return ENA_COM_INVAL;
921 tbl_size = (1ULL << log_size) *
922 sizeof(struct ena_admin_rss_ind_table_entry);
924 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
927 rss->rss_ind_tbl_dma_addr,
928 rss->rss_ind_tbl_mem_handle);
929 if (unlikely(!rss->rss_ind_tbl))
932 tbl_size = (1ULL << log_size) * sizeof(u16);
933 rss->host_rss_ind_tbl =
934 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
935 if (unlikely(!rss->host_rss_ind_tbl))
938 rss->tbl_log_size = log_size;
943 tbl_size = (1ULL << log_size) *
944 sizeof(struct ena_admin_rss_ind_table_entry);
946 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
949 rss->rss_ind_tbl_dma_addr,
950 rss->rss_ind_tbl_mem_handle);
951 rss->rss_ind_tbl = NULL;
953 rss->tbl_log_size = 0;
954 return ENA_COM_NO_MEM;
957 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
959 struct ena_rss *rss = &ena_dev->rss;
960 size_t tbl_size = (1ULL << rss->tbl_log_size) *
961 sizeof(struct ena_admin_rss_ind_table_entry);
963 if (rss->rss_ind_tbl)
964 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
967 rss->rss_ind_tbl_dma_addr,
968 rss->rss_ind_tbl_mem_handle);
969 rss->rss_ind_tbl = NULL;
971 if (rss->host_rss_ind_tbl)
972 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
973 rss->host_rss_ind_tbl = NULL;
976 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
977 struct ena_com_io_sq *io_sq, u16 cq_idx)
979 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
980 struct ena_admin_aq_create_sq_cmd create_cmd;
981 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
985 memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
987 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
989 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
990 direction = ENA_ADMIN_SQ_DIRECTION_TX;
992 direction = ENA_ADMIN_SQ_DIRECTION_RX;
994 create_cmd.sq_identity |= (direction <<
995 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
996 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
998 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
999 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1001 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1002 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1003 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1005 create_cmd.sq_caps_3 |=
1006 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1008 create_cmd.cq_idx = cq_idx;
1009 create_cmd.sq_depth = io_sq->q_depth;
1011 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1012 ret = ena_com_mem_addr_set(ena_dev,
1014 io_sq->desc_addr.phys_addr);
1015 if (unlikely(ret)) {
1016 ena_trc_err("memory address set failed\n");
1021 ret = ena_com_execute_admin_command(
1023 (struct ena_admin_aq_entry *)&create_cmd,
1025 (struct ena_admin_acq_entry *)&cmd_completion,
1026 sizeof(cmd_completion));
1027 if (unlikely(ret)) {
1028 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1032 io_sq->idx = cmd_completion.sq_idx;
1034 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1035 (uintptr_t)cmd_completion.sq_doorbell_offset);
1037 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1038 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1039 + cmd_completion.llq_headers_offset);
1041 io_sq->desc_addr.pbuf_dev_addr =
1042 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1043 cmd_completion.llq_descriptors_offset);
1046 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1051 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1053 struct ena_rss *rss = &ena_dev->rss;
1054 struct ena_com_io_sq *io_sq;
1058 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1059 qid = rss->host_rss_ind_tbl[i];
1060 if (qid >= ENA_TOTAL_NUM_QUEUES)
1061 return ENA_COM_INVAL;
1063 io_sq = &ena_dev->io_sq_queues[qid];
1065 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1066 return ENA_COM_INVAL;
1068 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1074 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1076 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1077 struct ena_rss *rss = &ena_dev->rss;
1081 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1082 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1084 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1085 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1086 return ENA_COM_INVAL;
1087 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1089 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1090 return ENA_COM_INVAL;
1092 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1098 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1102 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1104 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1105 if (!ena_dev->intr_moder_tbl)
1106 return ENA_COM_NO_MEM;
1108 ena_com_config_default_interrupt_moderation_table(ena_dev);
1114 ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1115 u16 intr_delay_resolution)
1117 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1120 if (!intr_delay_resolution) {
1121 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1122 intr_delay_resolution = 1;
1124 ena_dev->intr_delay_resolution = intr_delay_resolution;
1127 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1128 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1131 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1134 /*****************************************************************************/
1135 /******************************* API ******************************/
1136 /*****************************************************************************/
1138 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1139 struct ena_admin_aq_entry *cmd,
1141 struct ena_admin_acq_entry *comp,
1144 struct ena_comp_ctx *comp_ctx;
1147 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1149 if (unlikely(IS_ERR(comp_ctx))) {
1150 ena_trc_err("Failed to submit command [%ld]\n",
1152 return PTR_ERR(comp_ctx);
1155 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1156 if (unlikely(ret)) {
1157 if (admin_queue->running_state)
1158 ena_trc_err("Failed to process command. ret = %d\n",
1161 ena_trc_dbg("Failed to process command. ret = %d\n",
1167 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1168 struct ena_com_io_cq *io_cq)
1170 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1171 struct ena_admin_aq_create_cq_cmd create_cmd;
1172 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1175 memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
1177 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1179 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1180 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1181 create_cmd.cq_caps_1 |=
1182 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1184 create_cmd.msix_vector = io_cq->msix_vector;
1185 create_cmd.cq_depth = io_cq->q_depth;
1187 ret = ena_com_mem_addr_set(ena_dev,
1189 io_cq->cdesc_addr.phys_addr);
1190 if (unlikely(ret)) {
1191 ena_trc_err("memory address set failed\n");
1195 ret = ena_com_execute_admin_command(
1197 (struct ena_admin_aq_entry *)&create_cmd,
1199 (struct ena_admin_acq_entry *)&cmd_completion,
1200 sizeof(cmd_completion));
1201 if (unlikely(ret)) {
1202 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1206 io_cq->idx = cmd_completion.cq_idx;
1208 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1209 cmd_completion.cq_interrupt_unmask_register_offset);
1211 if (cmd_completion.cq_head_db_register_offset)
1212 io_cq->cq_head_db_reg =
1213 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1214 cmd_completion.cq_head_db_register_offset);
1216 if (cmd_completion.numa_node_register_offset)
1217 io_cq->numa_node_cfg_reg =
1218 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1219 cmd_completion.numa_node_register_offset);
1221 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1226 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1227 struct ena_com_io_sq **io_sq,
1228 struct ena_com_io_cq **io_cq)
1230 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1231 ena_trc_err("Invalid queue number %d but the max is %d\n",
1232 qid, ENA_TOTAL_NUM_QUEUES);
1233 return ENA_COM_INVAL;
1236 *io_sq = &ena_dev->io_sq_queues[qid];
1237 *io_cq = &ena_dev->io_cq_queues[qid];
1242 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1244 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1245 struct ena_comp_ctx *comp_ctx;
1248 if (!admin_queue->comp_ctx)
1251 for (i = 0; i < admin_queue->q_depth; i++) {
1252 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1253 if (unlikely(!comp_ctx))
1256 comp_ctx->status = ENA_CMD_ABORTED;
1258 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1262 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1264 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1265 unsigned long flags = 0;
1267 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1268 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1269 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1271 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1273 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1276 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1277 struct ena_com_io_cq *io_cq)
1279 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1280 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1281 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1284 memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
1286 destroy_cmd.cq_idx = io_cq->idx;
1287 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1289 ret = ena_com_execute_admin_command(
1291 (struct ena_admin_aq_entry *)&destroy_cmd,
1292 sizeof(destroy_cmd),
1293 (struct ena_admin_acq_entry *)&destroy_resp,
1294 sizeof(destroy_resp));
1296 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1297 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1302 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1304 return ena_dev->admin_queue.running_state;
1307 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1309 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1310 unsigned long flags = 0;
1312 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1313 ena_dev->admin_queue.running_state = state;
1314 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1317 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1319 u16 depth = ena_dev->aenq.q_depth;
1321 ENA_ASSERT(ena_dev->aenq.head == depth, "Invalid AENQ state\n");
1323 /* Init head_db to mark that all entries in the queue
1324 * are initially available
1326 ENA_REG_WRITE32(depth, (unsigned char *)ena_dev->reg_bar
1327 + ENA_REGS_AENQ_HEAD_DB_OFF);
1330 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1332 struct ena_com_admin_queue *admin_queue;
1333 struct ena_admin_set_feat_cmd cmd;
1334 struct ena_admin_set_feat_resp resp;
1335 struct ena_admin_get_feat_resp get_resp;
1338 if (unlikely(!ena_dev)) {
1339 ena_trc_err("%s : ena_dev is NULL\n", __func__);
1340 return ENA_COM_NO_DEVICE;
1343 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1345 ena_trc_info("Can't get aenq configuration\n");
1349 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1350 ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1351 get_resp.u.aenq.supported_groups,
1353 return ENA_COM_PERMISSION;
1356 memset(&cmd, 0x0, sizeof(cmd));
1357 admin_queue = &ena_dev->admin_queue;
1359 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1360 cmd.aq_common_descriptor.flags = 0;
1361 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1362 cmd.u.aenq.enabled_groups = groups_flag;
1364 ret = ena_com_execute_admin_command(admin_queue,
1365 (struct ena_admin_aq_entry *)&cmd,
1367 (struct ena_admin_acq_entry *)&resp,
1371 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1376 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1378 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1381 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1382 ena_trc_err("Reg read timeout occurred\n");
1383 return ENA_COM_TIMER_EXPIRED;
1386 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1387 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1389 ena_trc_dbg("ENA dma width: %d\n", width);
1391 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1392 ena_trc_err("DMA width illegal value: %d\n", width);
1393 return ENA_COM_INVAL;
1396 ena_dev->dma_addr_bits = width;
1401 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1405 u32 ctrl_ver_masked;
1407 /* Make sure the ENA version and the controller version are at least
1408 * as the driver expects
1410 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1411 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1412 ENA_REGS_CONTROLLER_VERSION_OFF);
1414 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1415 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1416 ena_trc_err("Reg read timeout occurred\n");
1417 return ENA_COM_TIMER_EXPIRED;
1420 ena_trc_info("ena device version: %d.%d\n",
1421 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1422 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1423 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1425 if (ver < MIN_ENA_VER) {
1426 ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
1430 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1431 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1432 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1433 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1434 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1435 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1436 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1437 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1440 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1441 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1442 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1444 /* Validate the ctrl version without the implementation ID */
1445 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1446 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1453 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1455 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1460 if (admin_queue->comp_ctx)
1461 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
1462 admin_queue->comp_ctx = NULL;
1464 if (admin_queue->sq.entries)
1465 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1466 ADMIN_SQ_SIZE(admin_queue->q_depth),
1467 admin_queue->sq.entries,
1468 admin_queue->sq.dma_addr,
1469 admin_queue->sq.mem_handle);
1470 admin_queue->sq.entries = NULL;
1472 if (admin_queue->cq.entries)
1473 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1474 ADMIN_CQ_SIZE(admin_queue->q_depth),
1475 admin_queue->cq.entries,
1476 admin_queue->cq.dma_addr,
1477 admin_queue->cq.mem_handle);
1478 admin_queue->cq.entries = NULL;
1480 if (ena_dev->aenq.entries)
1481 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1482 ADMIN_AENQ_SIZE(ena_dev->aenq.q_depth),
1483 ena_dev->aenq.entries,
1484 ena_dev->aenq.dma_addr,
1485 ena_dev->aenq.mem_handle);
1486 ena_dev->aenq.entries = NULL;
1489 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1491 ena_dev->admin_queue.polling = polling;
1494 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1496 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1498 ENA_SPINLOCK_INIT(mmio_read->lock);
1499 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1500 sizeof(*mmio_read->read_resp),
1501 mmio_read->read_resp,
1502 mmio_read->read_resp_dma_addr,
1503 mmio_read->read_resp_mem_handle);
1504 if (unlikely(!mmio_read->read_resp))
1505 return ENA_COM_NO_MEM;
1507 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1509 mmio_read->read_resp->req_id = 0x0;
1510 mmio_read->seq_num = 0x0;
1511 mmio_read->readless_supported = true;
1517 ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1519 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1521 mmio_read->readless_supported = readless_supported;
1524 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1526 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1528 ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
1529 + ENA_REGS_MMIO_RESP_LO_OFF);
1530 ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
1531 + ENA_REGS_MMIO_RESP_HI_OFF);
1533 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1534 sizeof(*mmio_read->read_resp),
1535 mmio_read->read_resp,
1536 mmio_read->read_resp_dma_addr,
1537 mmio_read->read_resp_mem_handle);
1539 mmio_read->read_resp = NULL;
1542 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1544 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1545 u32 addr_low, addr_high;
1547 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1548 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1550 ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
1551 + ENA_REGS_MMIO_RESP_LO_OFF);
1552 ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
1553 + ENA_REGS_MMIO_RESP_HI_OFF);
1556 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1557 struct ena_aenq_handlers *aenq_handlers,
1560 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1561 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1564 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1566 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1567 ena_trc_err("Reg read timeout occurred\n");
1568 return ENA_COM_TIMER_EXPIRED;
1571 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1572 ena_trc_err("Device isn't ready, abort com init\n");
1573 return ENA_COM_NO_DEVICE;
1576 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1578 admin_queue->q_dmadev = ena_dev->dmadev;
1579 admin_queue->polling = false;
1580 admin_queue->curr_cmd_id = 0;
1582 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1585 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1587 ret = ena_com_init_comp_ctxt(admin_queue);
1591 ret = ena_com_admin_init_sq(admin_queue);
1595 ret = ena_com_admin_init_cq(admin_queue);
1599 admin_queue->sq.db_addr = (u32 __iomem *)
1600 ((unsigned char *)ena_dev->reg_bar + ENA_REGS_AQ_DB_OFF);
1602 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1603 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1605 ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
1606 + ENA_REGS_AQ_BASE_LO_OFF);
1607 ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
1608 + ENA_REGS_AQ_BASE_HI_OFF);
1610 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1611 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1613 ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
1614 + ENA_REGS_ACQ_BASE_LO_OFF);
1615 ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
1616 + ENA_REGS_ACQ_BASE_HI_OFF);
1619 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1620 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1621 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1622 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1625 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1626 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1627 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1628 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1630 ENA_REG_WRITE32(aq_caps, (unsigned char *)ena_dev->reg_bar
1631 + ENA_REGS_AQ_CAPS_OFF);
1632 ENA_REG_WRITE32(acq_caps, (unsigned char *)ena_dev->reg_bar
1633 + ENA_REGS_ACQ_CAPS_OFF);
1634 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1638 admin_queue->running_state = true;
1642 ena_com_admin_destroy(ena_dev);
1647 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1648 struct ena_com_create_io_ctx *ctx)
1650 struct ena_com_io_sq *io_sq;
1651 struct ena_com_io_cq *io_cq;
1654 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1655 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1656 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1657 return ENA_COM_INVAL;
1660 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1661 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1663 memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
1664 memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
1667 io_cq->q_depth = ctx->queue_size;
1668 io_cq->direction = ctx->direction;
1669 io_cq->qid = ctx->qid;
1671 io_cq->msix_vector = ctx->msix_vector;
1673 io_sq->q_depth = ctx->queue_size;
1674 io_sq->direction = ctx->direction;
1675 io_sq->qid = ctx->qid;
1677 io_sq->mem_queue_type = ctx->mem_queue_type;
1679 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1680 /* header length is limited to 8 bits */
1681 io_sq->tx_max_header_size =
1682 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1684 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1687 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1691 ret = ena_com_create_io_cq(ena_dev, io_cq);
1695 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1702 ena_com_destroy_io_cq(ena_dev, io_cq);
1704 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1708 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1710 struct ena_com_io_sq *io_sq;
1711 struct ena_com_io_cq *io_cq;
1713 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1714 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1715 qid, ENA_TOTAL_NUM_QUEUES);
1719 io_sq = &ena_dev->io_sq_queues[qid];
1720 io_cq = &ena_dev->io_cq_queues[qid];
1722 ena_com_destroy_io_sq(ena_dev, io_sq);
1723 ena_com_destroy_io_cq(ena_dev, io_cq);
1725 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1728 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1729 struct ena_admin_get_feat_resp *resp)
1731 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1734 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1735 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1737 struct ena_admin_get_feat_resp get_resp;
1740 rc = ena_com_get_feature(ena_dev, &get_resp,
1741 ENA_ADMIN_DEVICE_ATTRIBUTES);
1745 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1746 sizeof(get_resp.u.dev_attr));
1747 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1749 rc = ena_com_get_feature(ena_dev, &get_resp,
1750 ENA_ADMIN_MAX_QUEUES_NUM);
1754 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1755 sizeof(get_resp.u.max_queue));
1756 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1758 rc = ena_com_get_feature(ena_dev, &get_resp,
1759 ENA_ADMIN_AENQ_CONFIG);
1763 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1764 sizeof(get_resp.u.aenq));
1766 rc = ena_com_get_feature(ena_dev, &get_resp,
1767 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1771 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1772 sizeof(get_resp.u.offload));
1777 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1779 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1782 /* ena_handle_specific_aenq_event:
1783 * return the handler that is relevant to the specific event group
1785 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1788 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1790 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1791 return aenq_handlers->handlers[group];
1793 return aenq_handlers->unimplemented_handler;
1796 /* ena_aenq_intr_handler:
1797 * handles the aenq incoming events.
1798 * pop events from the queue and apply the specific handler
1800 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1802 struct ena_admin_aenq_entry *aenq_e;
1803 struct ena_admin_aenq_common_desc *aenq_common;
1804 struct ena_com_aenq *aenq = &dev->aenq;
1805 ena_aenq_handler handler_cb;
1806 u16 masked_head, processed = 0;
1809 masked_head = aenq->head & (aenq->q_depth - 1);
1810 phase = aenq->phase;
1811 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1812 aenq_common = &aenq_e->aenq_common_desc;
1814 /* Go over all the events */
1815 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1817 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1819 aenq_common->syndrom,
1820 (unsigned long long)aenq_common->timestamp_low +
1821 ((u64)aenq_common->timestamp_high << 32));
1823 /* Handle specific event*/
1824 handler_cb = ena_com_get_specific_aenq_cb(dev,
1825 aenq_common->group);
1826 handler_cb(data, aenq_e); /* call the actual event handler*/
1828 /* Get next event entry */
1832 if (unlikely(masked_head == aenq->q_depth)) {
1836 aenq_e = &aenq->entries[masked_head];
1837 aenq_common = &aenq_e->aenq_common_desc;
1840 aenq->head += processed;
1841 aenq->phase = phase;
1843 /* Don't update aenq doorbell if there weren't any processed events */
1847 /* write the aenq doorbell after all AENQ descriptors were read */
1849 ENA_REG_WRITE32((u32)aenq->head, (unsigned char *)dev->reg_bar
1850 + ENA_REGS_AENQ_HEAD_DB_OFF);
1853 int ena_com_dev_reset(struct ena_com_dev *ena_dev)
1855 u32 stat, timeout, cap, reset_val;
1858 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1859 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1861 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1862 (cap == ENA_MMIO_READ_TIMEOUT))) {
1863 ena_trc_err("Reg read32 timeout occurred\n");
1864 return ENA_COM_TIMER_EXPIRED;
1867 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1868 ena_trc_err("Device isn't ready, can't reset device\n");
1869 return ENA_COM_INVAL;
1872 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1873 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1875 ena_trc_err("Invalid timeout value\n");
1876 return ENA_COM_INVAL;
1880 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1881 ENA_REG_WRITE32(reset_val, (unsigned char *)ena_dev->reg_bar
1882 + ENA_REGS_DEV_CTL_OFF);
1884 /* Write again the MMIO read request address */
1885 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1887 rc = wait_for_reset_state(ena_dev, timeout,
1888 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1890 ena_trc_err("Reset indication didn't turn on\n");
1895 ENA_REG_WRITE32(0, (unsigned char *)ena_dev->reg_bar
1896 + ENA_REGS_DEV_CTL_OFF);
1897 rc = wait_for_reset_state(ena_dev, timeout, 0);
1899 ena_trc_err("Reset indication didn't turn off\n");
1906 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1907 struct ena_admin_aq_get_stats_cmd *get_cmd,
1908 struct ena_admin_acq_get_stats_resp *get_resp,
1909 enum ena_admin_get_stats_type type)
1911 struct ena_com_admin_queue *admin_queue;
1915 ena_trc_err("%s : ena_dev is NULL\n", __func__);
1916 return ENA_COM_NO_DEVICE;
1919 admin_queue = &ena_dev->admin_queue;
1921 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1922 get_cmd->aq_common_descriptor.flags = 0;
1923 get_cmd->type = type;
1925 ret = ena_com_execute_admin_command(
1927 (struct ena_admin_aq_entry *)get_cmd,
1929 (struct ena_admin_acq_entry *)get_resp,
1933 ena_trc_err("Failed to get stats. error: %d\n", ret);
1938 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1939 struct ena_admin_basic_stats *stats)
1942 struct ena_admin_aq_get_stats_cmd get_cmd;
1943 struct ena_admin_acq_get_stats_resp get_resp;
1945 memset(&get_cmd, 0x0, sizeof(get_cmd));
1946 ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
1947 ENA_ADMIN_GET_STATS_TYPE_BASIC);
1948 if (likely(ret == 0))
1949 memcpy(stats, &get_resp.basic_stats,
1950 sizeof(get_resp.basic_stats));
1955 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
1959 struct ena_admin_aq_get_stats_cmd get_cmd;
1960 struct ena_admin_acq_get_stats_resp get_resp;
1961 ena_mem_handle_t mem_handle = 0;
1963 dma_addr_t phys_addr;
1965 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
1966 virt_addr, phys_addr, mem_handle);
1968 ret = ENA_COM_NO_MEM;
1971 memset(&get_cmd, 0x0, sizeof(get_cmd));
1972 ret = ena_com_mem_addr_set(ena_dev,
1973 &get_cmd.u.control_buffer.address,
1975 if (unlikely(ret)) {
1976 ena_trc_err("memory address set failed\n");
1979 get_cmd.u.control_buffer.length = len;
1981 get_cmd.device_id = ena_dev->stats_func;
1982 get_cmd.queue_idx = ena_dev->stats_queue;
1984 ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
1985 ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
1987 goto free_ext_stats_mem;
1989 ret = snprintf(buff, len, "%s", (char *)virt_addr);
1992 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
1998 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2000 struct ena_com_admin_queue *admin_queue;
2001 struct ena_admin_set_feat_cmd cmd;
2002 struct ena_admin_set_feat_resp resp;
2005 if (unlikely(!ena_dev)) {
2006 ena_trc_err("%s : ena_dev is NULL\n", __func__);
2007 return ENA_COM_NO_DEVICE;
2010 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2011 ena_trc_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2012 return ENA_COM_PERMISSION;
2015 memset(&cmd, 0x0, sizeof(cmd));
2016 admin_queue = &ena_dev->admin_queue;
2018 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2019 cmd.aq_common_descriptor.flags = 0;
2020 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2021 cmd.u.mtu.mtu = mtu;
2023 ret = ena_com_execute_admin_command(admin_queue,
2024 (struct ena_admin_aq_entry *)&cmd,
2026 (struct ena_admin_acq_entry *)&resp,
2029 if (unlikely(ret)) {
2030 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2031 return ENA_COM_INVAL;
2036 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2037 struct ena_admin_feature_offload_desc *offload)
2040 struct ena_admin_get_feat_resp resp;
2042 ret = ena_com_get_feature(ena_dev, &resp,
2043 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
2044 if (unlikely(ret)) {
2045 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2046 return ENA_COM_INVAL;
2049 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2054 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2056 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2057 struct ena_rss *rss = &ena_dev->rss;
2058 struct ena_admin_set_feat_cmd cmd;
2059 struct ena_admin_set_feat_resp resp;
2060 struct ena_admin_get_feat_resp get_resp;
2063 if (!ena_com_check_supported_feature_id(ena_dev,
2064 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2065 ena_trc_info("Feature %d isn't supported\n",
2066 ENA_ADMIN_RSS_HASH_FUNCTION);
2067 return ENA_COM_PERMISSION;
2070 /* Validate hash function is supported */
2071 ret = ena_com_get_feature(ena_dev, &get_resp,
2072 ENA_ADMIN_RSS_HASH_FUNCTION);
2076 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2077 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2079 return ENA_COM_PERMISSION;
2082 memset(&cmd, 0x0, sizeof(cmd));
2084 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2085 cmd.aq_common_descriptor.flags =
2086 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2087 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2088 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2089 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2091 ret = ena_com_mem_addr_set(ena_dev,
2092 &cmd.control_buffer.address,
2093 rss->hash_key_dma_addr);
2094 if (unlikely(ret)) {
2095 ena_trc_err("memory address set failed\n");
2099 cmd.control_buffer.length = sizeof(*rss->hash_key);
2101 ret = ena_com_execute_admin_command(admin_queue,
2102 (struct ena_admin_aq_entry *)&cmd,
2104 (struct ena_admin_acq_entry *)&resp,
2106 if (unlikely(ret)) {
2107 ena_trc_err("Failed to set hash function %d. error: %d\n",
2108 rss->hash_func, ret);
2109 return ENA_COM_INVAL;
2115 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2116 enum ena_admin_hash_functions func,
2117 const u8 *key, u16 key_len, u32 init_val)
2119 struct ena_rss *rss = &ena_dev->rss;
2120 struct ena_admin_get_feat_resp get_resp;
2121 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2125 /* Make sure size is a mult of DWs */
2126 if (unlikely(key_len & 0x3))
2127 return ENA_COM_INVAL;
2129 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2130 ENA_ADMIN_RSS_HASH_FUNCTION,
2131 rss->hash_key_dma_addr,
2132 sizeof(*rss->hash_key));
2136 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2137 ena_trc_err("Flow hash function %d isn't supported\n", func);
2138 return ENA_COM_PERMISSION;
2142 case ENA_ADMIN_TOEPLITZ:
2143 if (key_len > sizeof(hash_key->key)) {
2144 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2145 key_len, sizeof(hash_key->key));
2146 return ENA_COM_INVAL;
2149 memcpy(hash_key->key, key, key_len);
2150 rss->hash_init_val = init_val;
2151 hash_key->keys_num = key_len >> 2;
2153 case ENA_ADMIN_CRC32:
2154 rss->hash_init_val = init_val;
2157 ena_trc_err("Invalid hash function (%d)\n", func);
2158 return ENA_COM_INVAL;
2161 rc = ena_com_set_hash_function(ena_dev);
2163 /* Restore the old function */
2165 ena_com_get_hash_function(ena_dev, NULL, NULL);
2170 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2171 enum ena_admin_hash_functions *func,
2174 struct ena_rss *rss = &ena_dev->rss;
2175 struct ena_admin_get_feat_resp get_resp;
2176 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2180 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2181 ENA_ADMIN_RSS_HASH_FUNCTION,
2182 rss->hash_key_dma_addr,
2183 sizeof(*rss->hash_key));
2187 rss->hash_func = (enum ena_admin_hash_functions)get_resp.u.flow_hash_func.selected_func;
2189 *func = rss->hash_func;
2192 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2197 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2198 enum ena_admin_flow_hash_proto proto,
2201 struct ena_rss *rss = &ena_dev->rss;
2202 struct ena_admin_get_feat_resp get_resp;
2205 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2206 ENA_ADMIN_RSS_HASH_INPUT,
2207 rss->hash_ctrl_dma_addr,
2208 sizeof(*rss->hash_ctrl));
2213 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2218 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2220 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2221 struct ena_rss *rss = &ena_dev->rss;
2222 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2223 struct ena_admin_set_feat_cmd cmd;
2224 struct ena_admin_set_feat_resp resp;
2227 if (!ena_com_check_supported_feature_id(ena_dev,
2228 ENA_ADMIN_RSS_HASH_INPUT)) {
2229 ena_trc_info("Feature %d isn't supported\n",
2230 ENA_ADMIN_RSS_HASH_INPUT);
2231 return ENA_COM_PERMISSION;
2234 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2235 cmd.aq_common_descriptor.flags =
2236 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2237 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2238 cmd.u.flow_hash_input.enabled_input_sort =
2239 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2240 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2242 ret = ena_com_mem_addr_set(ena_dev,
2243 &cmd.control_buffer.address,
2244 rss->hash_ctrl_dma_addr);
2245 if (unlikely(ret)) {
2246 ena_trc_err("memory address set failed\n");
2249 cmd.control_buffer.length = sizeof(*hash_ctrl);
2251 ret = ena_com_execute_admin_command(admin_queue,
2252 (struct ena_admin_aq_entry *)&cmd,
2254 (struct ena_admin_acq_entry *)&resp,
2256 if (unlikely(ret)) {
2257 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2258 ret = ENA_COM_INVAL;
2264 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2266 struct ena_rss *rss = &ena_dev->rss;
2267 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2269 u16 available_fields = 0;
2272 /* Get the supported hash input */
2273 rc = ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
2277 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2278 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2279 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2281 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2282 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2283 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2285 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2286 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2287 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2289 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2290 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2291 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2293 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2294 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2296 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2297 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2299 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2300 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2302 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2303 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2305 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2306 available_fields = hash_ctrl->selected_fields[i].fields &
2307 hash_ctrl->supported_fields[i].fields;
2308 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2309 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2310 i, hash_ctrl->supported_fields[i].fields,
2311 hash_ctrl->selected_fields[i].fields);
2312 return ENA_COM_PERMISSION;
2316 rc = ena_com_set_hash_ctrl(ena_dev);
2318 /* In case of failure, restore the old hash ctrl */
2320 ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
2325 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2326 enum ena_admin_flow_hash_proto proto,
2329 struct ena_rss *rss = &ena_dev->rss;
2330 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2331 u16 supported_fields;
2334 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2335 ena_trc_err("Invalid proto num (%u)\n", proto);
2336 return ENA_COM_INVAL;
2339 /* Get the ctrl table */
2340 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2344 /* Make sure all the fields are supported */
2345 supported_fields = hash_ctrl->supported_fields[proto].fields;
2346 if ((hash_fields & supported_fields) != hash_fields) {
2347 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2348 proto, hash_fields, supported_fields);
2351 hash_ctrl->selected_fields[proto].fields = hash_fields;
2353 rc = ena_com_set_hash_ctrl(ena_dev);
2355 /* In case of failure, restore the old hash ctrl */
2357 ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
2362 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2363 u16 entry_idx, u16 entry_value)
2365 struct ena_rss *rss = &ena_dev->rss;
2367 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2368 return ENA_COM_INVAL;
2370 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2371 return ENA_COM_INVAL;
2373 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2378 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2380 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2381 struct ena_rss *rss = &ena_dev->rss;
2382 struct ena_admin_set_feat_cmd cmd;
2383 struct ena_admin_set_feat_resp resp;
2386 if (!ena_com_check_supported_feature_id(
2388 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2389 ena_trc_info("Feature %d isn't supported\n",
2390 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2391 return ENA_COM_PERMISSION;
2394 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2396 ena_trc_err("Failed to convert host indirection table to device table\n");
2400 memset(&cmd, 0x0, sizeof(cmd));
2402 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2403 cmd.aq_common_descriptor.flags =
2404 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2405 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2406 cmd.u.ind_table.size = rss->tbl_log_size;
2407 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2409 ret = ena_com_mem_addr_set(ena_dev,
2410 &cmd.control_buffer.address,
2411 rss->rss_ind_tbl_dma_addr);
2412 if (unlikely(ret)) {
2413 ena_trc_err("memory address set failed\n");
2417 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2418 sizeof(struct ena_admin_rss_ind_table_entry);
2420 ret = ena_com_execute_admin_command(admin_queue,
2421 (struct ena_admin_aq_entry *)&cmd,
2423 (struct ena_admin_acq_entry *)&resp,
2426 if (unlikely(ret)) {
2427 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2428 return ENA_COM_INVAL;
2434 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2436 struct ena_rss *rss = &ena_dev->rss;
2437 struct ena_admin_get_feat_resp get_resp;
2441 tbl_size = (1ULL << rss->tbl_log_size) *
2442 sizeof(struct ena_admin_rss_ind_table_entry);
2444 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2445 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2446 rss->rss_ind_tbl_dma_addr,
2454 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2458 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2459 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2464 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2468 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2470 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2474 rc = ena_com_hash_key_allocate(ena_dev);
2478 rc = ena_com_hash_ctrl_init(ena_dev);
2485 ena_com_hash_key_destroy(ena_dev);
2487 ena_com_indirect_table_destroy(ena_dev);
2493 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2495 ena_com_indirect_table_destroy(ena_dev);
2496 ena_com_hash_key_destroy(ena_dev);
2497 ena_com_hash_ctrl_destroy(ena_dev);
2499 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2502 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2504 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2506 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2508 host_attr->host_info,
2509 host_attr->host_info_dma_addr,
2510 host_attr->host_info_dma_handle);
2511 if (unlikely(!host_attr->host_info))
2512 return ENA_COM_NO_MEM;
2517 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2518 u32 debug_area_size) {
2519 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2521 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2523 host_attr->debug_area_virt_addr,
2524 host_attr->debug_area_dma_addr,
2525 host_attr->debug_area_dma_handle);
2526 if (unlikely(!host_attr->debug_area_virt_addr)) {
2527 host_attr->debug_area_size = 0;
2528 return ENA_COM_NO_MEM;
2531 host_attr->debug_area_size = debug_area_size;
2536 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2538 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2540 if (host_attr->host_info) {
2541 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2543 host_attr->host_info,
2544 host_attr->host_info_dma_addr,
2545 host_attr->host_info_dma_handle);
2546 host_attr->host_info = NULL;
2550 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2552 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2554 if (host_attr->debug_area_virt_addr) {
2555 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2556 host_attr->debug_area_size,
2557 host_attr->debug_area_virt_addr,
2558 host_attr->debug_area_dma_addr,
2559 host_attr->debug_area_dma_handle);
2560 host_attr->debug_area_virt_addr = NULL;
2564 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2566 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2567 struct ena_com_admin_queue *admin_queue;
2568 struct ena_admin_set_feat_cmd cmd;
2569 struct ena_admin_set_feat_resp resp;
2573 if (unlikely(!ena_dev)) {
2574 ena_trc_err("%s : ena_dev is NULL\n", __func__);
2575 return ENA_COM_NO_DEVICE;
2578 if (!ena_com_check_supported_feature_id(ena_dev,
2579 ENA_ADMIN_HOST_ATTR_CONFIG)) {
2580 ena_trc_warn("Set host attribute isn't supported\n");
2581 return ENA_COM_PERMISSION;
2584 memset(&cmd, 0x0, sizeof(cmd));
2585 admin_queue = &ena_dev->admin_queue;
2587 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2588 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2590 ret = ena_com_mem_addr_set(ena_dev,
2591 &cmd.u.host_attr.debug_ba,
2592 host_attr->debug_area_dma_addr);
2593 if (unlikely(ret)) {
2594 ena_trc_err("memory address set failed\n");
2598 ret = ena_com_mem_addr_set(ena_dev,
2599 &cmd.u.host_attr.os_info_ba,
2600 host_attr->host_info_dma_addr);
2601 if (unlikely(ret)) {
2602 ena_trc_err("memory address set failed\n");
2606 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2608 ret = ena_com_execute_admin_command(admin_queue,
2609 (struct ena_admin_aq_entry *)&cmd,
2611 (struct ena_admin_acq_entry *)&resp,
2615 ena_trc_err("Failed to set host attributes: %d\n", ret);
2620 /* Interrupt moderation */
2621 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2623 return ena_com_check_supported_feature_id(
2625 ENA_ADMIN_INTERRUPT_MODERATION);
2629 ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2630 u32 tx_coalesce_usecs)
2632 if (!ena_dev->intr_delay_resolution) {
2633 ena_trc_err("Illegal interrupt delay granularity value\n");
2634 return ENA_COM_FAULT;
2637 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2638 ena_dev->intr_delay_resolution;
2644 ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2645 u32 rx_coalesce_usecs)
2647 if (!ena_dev->intr_delay_resolution) {
2648 ena_trc_err("Illegal interrupt delay granularity value\n");
2649 return ENA_COM_FAULT;
2652 /* We use LOWEST entry of moderation table for storing
2653 * nonadaptive interrupt coalescing values
2655 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2656 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2661 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2663 if (ena_dev->intr_moder_tbl)
2664 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2665 ena_dev->intr_moder_tbl = NULL;
2668 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2670 struct ena_admin_get_feat_resp get_resp;
2671 u16 delay_resolution;
2674 rc = ena_com_get_feature(ena_dev, &get_resp,
2675 ENA_ADMIN_INTERRUPT_MODERATION);
2678 if (rc == ENA_COM_PERMISSION) {
2679 ena_trc_info("Feature %d isn't supported\n",
2680 ENA_ADMIN_INTERRUPT_MODERATION);
2683 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2687 /* no moderation supported, disable adaptive support */
2688 ena_com_disable_adaptive_moderation(ena_dev);
2692 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2696 /* if moderation is supported by device we set adaptive moderation */
2697 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2698 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2699 ena_com_enable_adaptive_moderation(ena_dev);
2703 ena_com_destroy_interrupt_moderation(ena_dev);
2708 ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2710 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2712 if (!intr_moder_tbl)
2715 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2716 ENA_INTR_LOWEST_USECS;
2717 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2718 ENA_INTR_LOWEST_PKTS;
2719 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2720 ENA_INTR_LOWEST_BYTES;
2722 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2724 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2726 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2729 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2731 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2733 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2736 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2737 ENA_INTR_HIGH_USECS;
2738 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2740 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2741 ENA_INTR_HIGH_BYTES;
2743 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2744 ENA_INTR_HIGHEST_USECS;
2745 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2746 ENA_INTR_HIGHEST_PKTS;
2747 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2748 ENA_INTR_HIGHEST_BYTES;
2752 ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2754 return ena_dev->intr_moder_tx_interval;
2758 ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2760 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2763 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2768 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2769 enum ena_intr_moder_level level,
2770 struct ena_intr_moder_entry *entry)
2772 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2774 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2777 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2778 if (ena_dev->intr_delay_resolution)
2779 intr_moder_tbl[level].intr_moder_interval /=
2780 ena_dev->intr_delay_resolution;
2781 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2782 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2785 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2786 enum ena_intr_moder_level level,
2787 struct ena_intr_moder_entry *entry)
2789 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2791 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2794 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2795 if (ena_dev->intr_delay_resolution)
2796 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2797 entry->pkts_per_interval =
2798 intr_moder_tbl[level].pkts_per_interval;
2799 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;