1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
8 /*****************************************************************************/
9 /*****************************************************************************/
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
18 #define ENA_CTRL_MAJOR 0
19 #define ENA_CTRL_MINOR 0
20 #define ENA_CTRL_SUB_MINOR 1
22 #define MIN_ENA_CTRL_VER \
23 (((ENA_CTRL_MAJOR) << \
24 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
25 ((ENA_CTRL_MINOR) << \
26 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
29 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
30 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
32 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
34 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
36 #define ENA_REGS_ADMIN_INTR_MASK 1
40 /*****************************************************************************/
41 /*****************************************************************************/
42 /*****************************************************************************/
47 /* Abort - canceled by the driver */
52 ena_wait_event_t wait_event;
53 struct ena_admin_acq_entry *user_cqe;
55 enum ena_cmd_status status;
56 /* status from the device */
62 struct ena_com_stats_ctx {
63 struct ena_admin_aq_get_stats_cmd get_cmd;
64 struct ena_admin_acq_get_stats_resp get_resp;
67 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
68 struct ena_common_mem_addr *ena_addr,
71 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
72 ena_trc_err("dma address has more bits that the device supports\n");
76 ena_addr->mem_addr_low = lower_32_bits(addr);
77 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
82 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
84 struct ena_com_admin_sq *sq = &queue->sq;
85 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
87 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
91 ena_trc_err("memory allocation failed");
92 return ENA_COM_NO_MEM;
104 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
106 struct ena_com_admin_cq *cq = &queue->cq;
107 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
109 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
113 ena_trc_err("memory allocation failed");
114 return ENA_COM_NO_MEM;
123 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
124 struct ena_aenq_handlers *aenq_handlers)
126 struct ena_com_aenq *aenq = &dev->aenq;
127 u32 addr_low, addr_high, aenq_caps;
130 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
131 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
132 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
137 if (!aenq->entries) {
138 ena_trc_err("memory allocation failed");
139 return ENA_COM_NO_MEM;
142 aenq->head = aenq->q_depth;
145 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
146 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
148 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
149 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
152 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
153 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
154 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
155 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
156 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
158 if (unlikely(!aenq_handlers)) {
159 ena_trc_err("aenq handlers pointer is NULL\n");
160 return ENA_COM_INVAL;
163 aenq->aenq_handlers = aenq_handlers;
168 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
169 struct ena_comp_ctx *comp_ctx)
171 comp_ctx->occupied = false;
172 ATOMIC32_DEC(&queue->outstanding_cmds);
175 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
176 u16 command_id, bool capture)
178 if (unlikely(command_id >= queue->q_depth)) {
179 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
180 command_id, queue->q_depth);
184 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
185 ena_trc_err("Completion context is occupied\n");
190 ATOMIC32_INC(&queue->outstanding_cmds);
191 queue->comp_ctx[command_id].occupied = true;
194 return &queue->comp_ctx[command_id];
197 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
198 struct ena_admin_aq_entry *cmd,
199 size_t cmd_size_in_bytes,
200 struct ena_admin_acq_entry *comp,
201 size_t comp_size_in_bytes)
203 struct ena_comp_ctx *comp_ctx;
204 u16 tail_masked, cmd_id;
208 queue_size_mask = admin_queue->q_depth - 1;
210 tail_masked = admin_queue->sq.tail & queue_size_mask;
212 /* In case of queue FULL */
213 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
214 if (cnt >= admin_queue->q_depth) {
215 ena_trc_dbg("admin queue is full.\n");
216 admin_queue->stats.out_of_space++;
217 return ERR_PTR(ENA_COM_NO_SPACE);
220 cmd_id = admin_queue->curr_cmd_id;
222 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
223 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
225 cmd->aq_common_descriptor.command_id |= cmd_id &
226 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
228 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
229 if (unlikely(!comp_ctx))
230 return ERR_PTR(ENA_COM_INVAL);
232 comp_ctx->status = ENA_CMD_SUBMITTED;
233 comp_ctx->comp_size = (u32)comp_size_in_bytes;
234 comp_ctx->user_cqe = comp;
235 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
237 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
239 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
241 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
244 admin_queue->sq.tail++;
245 admin_queue->stats.submitted_cmd++;
247 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
248 admin_queue->sq.phase = !admin_queue->sq.phase;
250 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
251 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
252 admin_queue->sq.db_addr);
257 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
259 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
260 struct ena_comp_ctx *comp_ctx;
263 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
264 if (unlikely(!queue->comp_ctx)) {
265 ena_trc_err("memory allocation failed");
266 return ENA_COM_NO_MEM;
269 for (i = 0; i < queue->q_depth; i++) {
270 comp_ctx = get_comp_ctxt(queue, i, false);
272 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
278 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
279 struct ena_admin_aq_entry *cmd,
280 size_t cmd_size_in_bytes,
281 struct ena_admin_acq_entry *comp,
282 size_t comp_size_in_bytes)
284 unsigned long flags = 0;
285 struct ena_comp_ctx *comp_ctx;
287 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
288 if (unlikely(!admin_queue->running_state)) {
289 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
290 return ERR_PTR(ENA_COM_NO_DEVICE);
292 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
296 if (IS_ERR(comp_ctx))
297 admin_queue->running_state = false;
298 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
303 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
304 struct ena_com_create_io_ctx *ctx,
305 struct ena_com_io_sq *io_sq)
310 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
312 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
313 io_sq->desc_entry_size =
314 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
315 sizeof(struct ena_eth_io_tx_desc) :
316 sizeof(struct ena_eth_io_rx_desc);
318 size = io_sq->desc_entry_size * io_sq->q_depth;
319 io_sq->bus = ena_dev->bus;
321 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
322 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
324 io_sq->desc_addr.virt_addr,
325 io_sq->desc_addr.phys_addr,
326 io_sq->desc_addr.mem_handle,
329 if (!io_sq->desc_addr.virt_addr) {
330 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
332 io_sq->desc_addr.virt_addr,
333 io_sq->desc_addr.phys_addr,
334 io_sq->desc_addr.mem_handle);
337 if (!io_sq->desc_addr.virt_addr) {
338 ena_trc_err("memory allocation failed");
339 return ENA_COM_NO_MEM;
343 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
344 /* Allocate bounce buffers */
345 io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
346 io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
347 io_sq->bounce_buf_ctrl.next_to_use = 0;
349 size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
351 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
353 io_sq->bounce_buf_ctrl.base_buffer,
356 if (!io_sq->bounce_buf_ctrl.base_buffer)
357 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
359 if (!io_sq->bounce_buf_ctrl.base_buffer) {
360 ena_trc_err("bounce buffer memory allocation failed");
361 return ENA_COM_NO_MEM;
364 memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
366 /* Initiate the first bounce buffer */
367 io_sq->llq_buf_ctrl.curr_bounce_buf =
368 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
369 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
370 0x0, io_sq->llq_info.desc_list_entry_size);
371 io_sq->llq_buf_ctrl.descs_left_in_line =
372 io_sq->llq_info.descs_num_before_header;
374 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
375 io_sq->entries_in_tx_burst_left =
376 io_sq->llq_info.max_entries_in_tx_burst;
380 io_sq->next_to_comp = 0;
386 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
387 struct ena_com_create_io_ctx *ctx,
388 struct ena_com_io_cq *io_cq)
393 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
395 /* Use the basic completion descriptor for Rx */
396 io_cq->cdesc_entry_size_in_bytes =
397 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
398 sizeof(struct ena_eth_io_tx_cdesc) :
399 sizeof(struct ena_eth_io_rx_cdesc_base);
401 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
402 io_cq->bus = ena_dev->bus;
404 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
406 io_cq->cdesc_addr.virt_addr,
407 io_cq->cdesc_addr.phys_addr,
408 io_cq->cdesc_addr.mem_handle,
411 if (!io_cq->cdesc_addr.virt_addr) {
412 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
414 io_cq->cdesc_addr.virt_addr,
415 io_cq->cdesc_addr.phys_addr,
416 io_cq->cdesc_addr.mem_handle);
419 if (!io_cq->cdesc_addr.virt_addr) {
420 ena_trc_err("memory allocation failed");
421 return ENA_COM_NO_MEM;
430 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
431 struct ena_admin_acq_entry *cqe)
433 struct ena_comp_ctx *comp_ctx;
436 cmd_id = cqe->acq_common_descriptor.command &
437 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
439 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
440 if (unlikely(!comp_ctx)) {
441 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
442 admin_queue->running_state = false;
446 comp_ctx->status = ENA_CMD_COMPLETED;
447 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
449 if (comp_ctx->user_cqe)
450 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
452 if (!admin_queue->polling)
453 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
456 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
458 struct ena_admin_acq_entry *cqe = NULL;
463 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
464 phase = admin_queue->cq.phase;
466 cqe = &admin_queue->cq.entries[head_masked];
468 /* Go over all the completions */
469 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
470 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
471 /* Do not read the rest of the completion entry before the
472 * phase bit was validated
475 ena_com_handle_single_admin_completion(admin_queue, cqe);
479 if (unlikely(head_masked == admin_queue->q_depth)) {
484 cqe = &admin_queue->cq.entries[head_masked];
487 admin_queue->cq.head += comp_num;
488 admin_queue->cq.phase = phase;
489 admin_queue->sq.head += comp_num;
490 admin_queue->stats.completed_cmd += comp_num;
493 static int ena_com_comp_status_to_errno(u8 comp_status)
495 if (unlikely(comp_status != 0))
496 ena_trc_err("admin command failed[%u]\n", comp_status);
498 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
499 return ENA_COM_INVAL;
501 switch (comp_status) {
502 case ENA_ADMIN_SUCCESS:
504 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
505 return ENA_COM_NO_MEM;
506 case ENA_ADMIN_UNSUPPORTED_OPCODE:
507 return ENA_COM_UNSUPPORTED;
508 case ENA_ADMIN_BAD_OPCODE:
509 case ENA_ADMIN_MALFORMED_REQUEST:
510 case ENA_ADMIN_ILLEGAL_PARAMETER:
511 case ENA_ADMIN_UNKNOWN_ERROR:
512 return ENA_COM_INVAL;
518 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
519 struct ena_com_admin_queue *admin_queue)
521 unsigned long flags = 0;
525 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
528 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
529 ena_com_handle_admin_completion(admin_queue);
530 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
532 if (comp_ctx->status != ENA_CMD_SUBMITTED)
535 if (ENA_TIME_EXPIRE(timeout)) {
536 ena_trc_err("Wait for completion (polling) timeout\n");
537 /* ENA didn't have any completion */
538 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
539 admin_queue->stats.no_completion++;
540 admin_queue->running_state = false;
541 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
543 ret = ENA_COM_TIMER_EXPIRED;
547 ENA_MSLEEP(ENA_POLL_MS);
550 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
551 ena_trc_err("Command was aborted\n");
552 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
553 admin_queue->stats.aborted_cmd++;
554 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
555 ret = ENA_COM_NO_DEVICE;
559 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
560 "Invalid comp status %d\n", comp_ctx->status);
562 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
564 comp_ctxt_release(admin_queue, comp_ctx);
569 * Set the LLQ configurations of the firmware
571 * The driver provides only the enabled feature values to the FW,
572 * which in turn, checks if they are supported.
574 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
576 struct ena_com_admin_queue *admin_queue;
577 struct ena_admin_set_feat_cmd cmd;
578 struct ena_admin_set_feat_resp resp;
579 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
582 memset(&cmd, 0x0, sizeof(cmd));
583 admin_queue = &ena_dev->admin_queue;
585 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
586 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
588 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
589 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
590 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
591 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
593 ret = ena_com_execute_admin_command(admin_queue,
594 (struct ena_admin_aq_entry *)&cmd,
596 (struct ena_admin_acq_entry *)&resp,
600 ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
605 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
606 struct ena_admin_feature_llq_desc *llq_features,
607 struct ena_llq_configurations *llq_default_cfg)
609 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
613 memset(llq_info, 0, sizeof(*llq_info));
615 supported_feat = llq_features->header_location_ctrl_supported;
617 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
618 llq_info->header_location_ctrl = llq_default_cfg->llq_header_location;
620 ena_trc_err("Invalid header location control, supported: 0x%x\n",
625 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
626 llq_info->inline_header = true;
628 supported_feat = llq_features->descriptors_stride_ctrl_supported;
629 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
630 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
632 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
633 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
634 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
635 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
637 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
642 ena_trc_err("Default llq stride ctrl is not supported, performing fallback,"
643 "default: 0x%x, supported: 0x%x, used: 0x%x\n",
644 llq_default_cfg->llq_stride_ctrl,
646 llq_info->desc_stride_ctrl);
649 llq_info->inline_header = false;
650 llq_info->desc_stride_ctrl = 0;
653 supported_feat = llq_features->entry_size_ctrl_supported;
654 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
655 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
656 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
658 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
659 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
660 llq_info->desc_list_entry_size = 128;
661 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
662 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
663 llq_info->desc_list_entry_size = 192;
664 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
665 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
666 llq_info->desc_list_entry_size = 256;
668 ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
672 ena_trc_err("Default llq ring entry size is not supported, performing fallback,"
673 "default: 0x%x, supported: 0x%x, used: 0x%x\n",
674 llq_default_cfg->llq_ring_entry_size,
676 llq_info->desc_list_entry_size);
678 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
679 /* The desc list entry size should be whole multiply of 8
680 * This requirement comes from __iowrite64_copy()
682 ena_trc_err("illegal entry size %d\n",
683 llq_info->desc_list_entry_size);
687 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
688 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
689 sizeof(struct ena_eth_io_tx_desc);
691 llq_info->descs_per_entry = 1;
693 supported_feat = llq_features->desc_num_before_header_supported;
694 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
695 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
697 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
698 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
699 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
700 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
701 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
702 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
703 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
704 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
706 ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
711 ena_trc_err("Default llq num descs before header is not supported, performing fallback,"
712 "default: 0x%x, supported: 0x%x, used: 0x%x\n",
713 llq_default_cfg->llq_num_decs_before_header,
715 llq_info->descs_num_before_header);
718 llq_info->max_entries_in_tx_burst =
719 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
721 rc = ena_com_set_llq(ena_dev);
723 ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
730 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
731 struct ena_com_admin_queue *admin_queue)
733 unsigned long flags = 0;
736 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
737 admin_queue->completion_timeout);
739 /* In case the command wasn't completed find out the root cause.
740 * There might be 2 kinds of errors
741 * 1) No completion (timeout reached)
742 * 2) There is completion but the device didn't get any msi-x interrupt.
744 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
745 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
746 ena_com_handle_admin_completion(admin_queue);
747 admin_queue->stats.no_completion++;
748 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
750 if (comp_ctx->status == ENA_CMD_COMPLETED)
751 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
752 comp_ctx->cmd_opcode);
754 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
755 comp_ctx->cmd_opcode, comp_ctx->status);
757 admin_queue->running_state = false;
758 ret = ENA_COM_TIMER_EXPIRED;
762 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
764 comp_ctxt_release(admin_queue, comp_ctx);
768 /* This method read the hardware device register through posting writes
769 * and waiting for response
770 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
772 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
774 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
775 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
776 mmio_read->read_resp;
777 u32 mmio_read_reg, ret, i;
778 unsigned long flags = 0;
779 u32 timeout = mmio_read->reg_read_to;
784 timeout = ENA_REG_READ_TIMEOUT;
786 /* If readless is disabled, perform regular read */
787 if (!mmio_read->readless_supported)
788 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
790 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
791 mmio_read->seq_num++;
793 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
794 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
795 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
796 mmio_read_reg |= mmio_read->seq_num &
797 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
799 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
800 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
802 for (i = 0; i < timeout; i++) {
803 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
809 if (unlikely(i == timeout)) {
810 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
815 ret = ENA_MMIO_READ_TIMEOUT;
819 if (read_resp->reg_off != offset) {
820 ena_trc_err("Read failure: wrong offset provided");
821 ret = ENA_MMIO_READ_TIMEOUT;
823 ret = read_resp->reg_val;
826 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
831 /* There are two types to wait for completion.
832 * Polling mode - wait until the completion is available.
833 * Async mode - wait on wait queue until the completion is ready
834 * (or the timeout expired).
835 * It is expected that the IRQ called ena_com_handle_admin_completion
836 * to mark the completions.
838 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
839 struct ena_com_admin_queue *admin_queue)
841 if (admin_queue->polling)
842 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
845 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
849 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
850 struct ena_com_io_sq *io_sq)
852 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
853 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
854 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
858 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
860 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
861 direction = ENA_ADMIN_SQ_DIRECTION_TX;
863 direction = ENA_ADMIN_SQ_DIRECTION_RX;
865 destroy_cmd.sq.sq_identity |= (direction <<
866 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
867 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
869 destroy_cmd.sq.sq_idx = io_sq->idx;
870 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
872 ret = ena_com_execute_admin_command(admin_queue,
873 (struct ena_admin_aq_entry *)&destroy_cmd,
875 (struct ena_admin_acq_entry *)&destroy_resp,
876 sizeof(destroy_resp));
878 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
879 ena_trc_err("failed to destroy io sq error: %d\n", ret);
884 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
885 struct ena_com_io_sq *io_sq,
886 struct ena_com_io_cq *io_cq)
890 if (io_cq->cdesc_addr.virt_addr) {
891 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
893 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
895 io_cq->cdesc_addr.virt_addr,
896 io_cq->cdesc_addr.phys_addr,
897 io_cq->cdesc_addr.mem_handle);
899 io_cq->cdesc_addr.virt_addr = NULL;
902 if (io_sq->desc_addr.virt_addr) {
903 size = io_sq->desc_entry_size * io_sq->q_depth;
905 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
907 io_sq->desc_addr.virt_addr,
908 io_sq->desc_addr.phys_addr,
909 io_sq->desc_addr.mem_handle);
911 io_sq->desc_addr.virt_addr = NULL;
914 if (io_sq->bounce_buf_ctrl.base_buffer) {
915 size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
916 ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
917 io_sq->bounce_buf_ctrl.base_buffer = NULL;
921 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
926 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
927 timeout = (timeout * 100) / ENA_POLL_MS;
929 for (i = 0; i < timeout; i++) {
930 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
932 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
933 ena_trc_err("Reg read timeout occurred\n");
934 return ENA_COM_TIMER_EXPIRED;
937 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
941 ENA_MSLEEP(ENA_POLL_MS);
944 return ENA_COM_TIMER_EXPIRED;
947 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
948 enum ena_admin_aq_feature_id feature_id)
950 u32 feature_mask = 1 << feature_id;
952 /* Device attributes is always supported */
953 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
954 !(ena_dev->supported_features & feature_mask))
960 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
961 struct ena_admin_get_feat_resp *get_resp,
962 enum ena_admin_aq_feature_id feature_id,
963 dma_addr_t control_buf_dma_addr,
964 u32 control_buff_size,
967 struct ena_com_admin_queue *admin_queue;
968 struct ena_admin_get_feat_cmd get_cmd;
971 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
972 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
973 return ENA_COM_UNSUPPORTED;
976 memset(&get_cmd, 0x0, sizeof(get_cmd));
977 admin_queue = &ena_dev->admin_queue;
979 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
981 if (control_buff_size)
982 get_cmd.aq_common_descriptor.flags =
983 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
985 get_cmd.aq_common_descriptor.flags = 0;
987 ret = ena_com_mem_addr_set(ena_dev,
988 &get_cmd.control_buffer.address,
989 control_buf_dma_addr);
991 ena_trc_err("memory address set failed\n");
995 get_cmd.control_buffer.length = control_buff_size;
996 get_cmd.feat_common.feature_version = feature_ver;
997 get_cmd.feat_common.feature_id = feature_id;
999 ret = ena_com_execute_admin_command(admin_queue,
1000 (struct ena_admin_aq_entry *)
1003 (struct ena_admin_acq_entry *)
1008 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
1014 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1015 struct ena_admin_get_feat_resp *get_resp,
1016 enum ena_admin_aq_feature_id feature_id,
1019 return ena_com_get_feature_ex(ena_dev,
1027 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1029 struct ena_rss *rss = &ena_dev->rss;
1031 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1032 sizeof(*rss->hash_key),
1034 rss->hash_key_dma_addr,
1035 rss->hash_key_mem_handle);
1037 if (unlikely(!rss->hash_key))
1038 return ENA_COM_NO_MEM;
1043 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1045 struct ena_rss *rss = &ena_dev->rss;
1048 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1049 sizeof(*rss->hash_key),
1051 rss->hash_key_dma_addr,
1052 rss->hash_key_mem_handle);
1053 rss->hash_key = NULL;
1056 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1058 struct ena_rss *rss = &ena_dev->rss;
1060 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1061 sizeof(*rss->hash_ctrl),
1063 rss->hash_ctrl_dma_addr,
1064 rss->hash_ctrl_mem_handle);
1066 if (unlikely(!rss->hash_ctrl))
1067 return ENA_COM_NO_MEM;
1072 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1074 struct ena_rss *rss = &ena_dev->rss;
1077 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1078 sizeof(*rss->hash_ctrl),
1080 rss->hash_ctrl_dma_addr,
1081 rss->hash_ctrl_mem_handle);
1082 rss->hash_ctrl = NULL;
1085 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1088 struct ena_rss *rss = &ena_dev->rss;
1089 struct ena_admin_get_feat_resp get_resp;
1093 ret = ena_com_get_feature(ena_dev, &get_resp,
1094 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1098 if ((get_resp.u.ind_table.min_size > log_size) ||
1099 (get_resp.u.ind_table.max_size < log_size)) {
1100 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1102 1 << get_resp.u.ind_table.min_size,
1103 1 << get_resp.u.ind_table.max_size);
1104 return ENA_COM_INVAL;
1107 tbl_size = (1ULL << log_size) *
1108 sizeof(struct ena_admin_rss_ind_table_entry);
1110 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1113 rss->rss_ind_tbl_dma_addr,
1114 rss->rss_ind_tbl_mem_handle);
1115 if (unlikely(!rss->rss_ind_tbl))
1118 tbl_size = (1ULL << log_size) * sizeof(u16);
1119 rss->host_rss_ind_tbl =
1120 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1121 if (unlikely(!rss->host_rss_ind_tbl))
1124 rss->tbl_log_size = log_size;
1129 tbl_size = (1ULL << log_size) *
1130 sizeof(struct ena_admin_rss_ind_table_entry);
1132 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1135 rss->rss_ind_tbl_dma_addr,
1136 rss->rss_ind_tbl_mem_handle);
1137 rss->rss_ind_tbl = NULL;
1139 rss->tbl_log_size = 0;
1140 return ENA_COM_NO_MEM;
1143 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1145 struct ena_rss *rss = &ena_dev->rss;
1146 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1147 sizeof(struct ena_admin_rss_ind_table_entry);
1149 if (rss->rss_ind_tbl)
1150 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1153 rss->rss_ind_tbl_dma_addr,
1154 rss->rss_ind_tbl_mem_handle);
1155 rss->rss_ind_tbl = NULL;
1157 if (rss->host_rss_ind_tbl)
1158 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
1159 rss->host_rss_ind_tbl = NULL;
1162 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1163 struct ena_com_io_sq *io_sq, u16 cq_idx)
1165 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1166 struct ena_admin_aq_create_sq_cmd create_cmd;
1167 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1171 memset(&create_cmd, 0x0, sizeof(create_cmd));
1173 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1175 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1176 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1178 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1180 create_cmd.sq_identity |= (direction <<
1181 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1182 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1184 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1185 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1187 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1188 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1189 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1191 create_cmd.sq_caps_3 |=
1192 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1194 create_cmd.cq_idx = cq_idx;
1195 create_cmd.sq_depth = io_sq->q_depth;
1197 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1198 ret = ena_com_mem_addr_set(ena_dev,
1200 io_sq->desc_addr.phys_addr);
1201 if (unlikely(ret)) {
1202 ena_trc_err("memory address set failed\n");
1207 ret = ena_com_execute_admin_command(admin_queue,
1208 (struct ena_admin_aq_entry *)&create_cmd,
1210 (struct ena_admin_acq_entry *)&cmd_completion,
1211 sizeof(cmd_completion));
1212 if (unlikely(ret)) {
1213 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1217 io_sq->idx = cmd_completion.sq_idx;
1219 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1220 (uintptr_t)cmd_completion.sq_doorbell_offset);
1222 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1223 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1224 + cmd_completion.llq_headers_offset);
1226 io_sq->desc_addr.pbuf_dev_addr =
1227 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1228 cmd_completion.llq_descriptors_offset);
1231 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1236 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1238 struct ena_rss *rss = &ena_dev->rss;
1239 struct ena_com_io_sq *io_sq;
1243 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1244 qid = rss->host_rss_ind_tbl[i];
1245 if (qid >= ENA_TOTAL_NUM_QUEUES)
1246 return ENA_COM_INVAL;
1248 io_sq = &ena_dev->io_sq_queues[qid];
1250 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1251 return ENA_COM_INVAL;
1253 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1259 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1261 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1262 struct ena_rss *rss = &ena_dev->rss;
1266 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1267 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1269 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1270 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1271 return ENA_COM_INVAL;
1272 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1274 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1275 return ENA_COM_INVAL;
1277 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1283 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1287 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1289 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1290 if (!ena_dev->intr_moder_tbl)
1291 return ENA_COM_NO_MEM;
1293 ena_com_config_default_interrupt_moderation_table(ena_dev);
1298 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1299 u16 intr_delay_resolution)
1301 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1304 if (!intr_delay_resolution) {
1305 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1306 intr_delay_resolution = 1;
1308 ena_dev->intr_delay_resolution = intr_delay_resolution;
1311 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1312 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1315 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1318 /*****************************************************************************/
1319 /******************************* API ******************************/
1320 /*****************************************************************************/
1322 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1323 struct ena_admin_aq_entry *cmd,
1325 struct ena_admin_acq_entry *comp,
1328 struct ena_comp_ctx *comp_ctx;
1331 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1333 if (IS_ERR(comp_ctx)) {
1334 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1335 ena_trc_dbg("Failed to submit command [%ld]\n",
1338 ena_trc_err("Failed to submit command [%ld]\n",
1341 return PTR_ERR(comp_ctx);
1344 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1345 if (unlikely(ret)) {
1346 if (admin_queue->running_state)
1347 ena_trc_err("Failed to process command. ret = %d\n",
1350 ena_trc_dbg("Failed to process command. ret = %d\n",
1356 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1357 struct ena_com_io_cq *io_cq)
1359 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1360 struct ena_admin_aq_create_cq_cmd create_cmd;
1361 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1364 memset(&create_cmd, 0x0, sizeof(create_cmd));
1366 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1368 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1369 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1370 create_cmd.cq_caps_1 |=
1371 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1373 create_cmd.msix_vector = io_cq->msix_vector;
1374 create_cmd.cq_depth = io_cq->q_depth;
1376 ret = ena_com_mem_addr_set(ena_dev,
1378 io_cq->cdesc_addr.phys_addr);
1379 if (unlikely(ret)) {
1380 ena_trc_err("memory address set failed\n");
1384 ret = ena_com_execute_admin_command(admin_queue,
1385 (struct ena_admin_aq_entry *)&create_cmd,
1387 (struct ena_admin_acq_entry *)&cmd_completion,
1388 sizeof(cmd_completion));
1389 if (unlikely(ret)) {
1390 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1394 io_cq->idx = cmd_completion.cq_idx;
1396 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1397 cmd_completion.cq_interrupt_unmask_register_offset);
1399 if (cmd_completion.cq_head_db_register_offset)
1400 io_cq->cq_head_db_reg =
1401 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1402 cmd_completion.cq_head_db_register_offset);
1404 if (cmd_completion.numa_node_register_offset)
1405 io_cq->numa_node_cfg_reg =
1406 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1407 cmd_completion.numa_node_register_offset);
1409 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1414 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1415 struct ena_com_io_sq **io_sq,
1416 struct ena_com_io_cq **io_cq)
1418 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1419 ena_trc_err("Invalid queue number %d but the max is %d\n",
1420 qid, ENA_TOTAL_NUM_QUEUES);
1421 return ENA_COM_INVAL;
1424 *io_sq = &ena_dev->io_sq_queues[qid];
1425 *io_cq = &ena_dev->io_cq_queues[qid];
1430 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1432 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1433 struct ena_comp_ctx *comp_ctx;
1436 if (!admin_queue->comp_ctx)
1439 for (i = 0; i < admin_queue->q_depth; i++) {
1440 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1441 if (unlikely(!comp_ctx))
1444 comp_ctx->status = ENA_CMD_ABORTED;
1446 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1450 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1452 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1453 unsigned long flags = 0;
1455 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1456 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1457 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1458 ENA_MSLEEP(ENA_POLL_MS);
1459 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1461 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1464 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1465 struct ena_com_io_cq *io_cq)
1467 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1468 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1469 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1472 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1474 destroy_cmd.cq_idx = io_cq->idx;
1475 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1477 ret = ena_com_execute_admin_command(admin_queue,
1478 (struct ena_admin_aq_entry *)&destroy_cmd,
1479 sizeof(destroy_cmd),
1480 (struct ena_admin_acq_entry *)&destroy_resp,
1481 sizeof(destroy_resp));
1483 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1484 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1489 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1491 return ena_dev->admin_queue.running_state;
1494 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1496 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1497 unsigned long flags = 0;
1499 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1500 ena_dev->admin_queue.running_state = state;
1501 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1504 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1506 u16 depth = ena_dev->aenq.q_depth;
1508 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1510 /* Init head_db to mark that all entries in the queue
1511 * are initially available
1513 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1516 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1518 struct ena_com_admin_queue *admin_queue;
1519 struct ena_admin_set_feat_cmd cmd;
1520 struct ena_admin_set_feat_resp resp;
1521 struct ena_admin_get_feat_resp get_resp;
1524 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1526 ena_trc_info("Can't get aenq configuration\n");
1530 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1531 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1532 get_resp.u.aenq.supported_groups,
1534 return ENA_COM_UNSUPPORTED;
1537 memset(&cmd, 0x0, sizeof(cmd));
1538 admin_queue = &ena_dev->admin_queue;
1540 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1541 cmd.aq_common_descriptor.flags = 0;
1542 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1543 cmd.u.aenq.enabled_groups = groups_flag;
1545 ret = ena_com_execute_admin_command(admin_queue,
1546 (struct ena_admin_aq_entry *)&cmd,
1548 (struct ena_admin_acq_entry *)&resp,
1552 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1557 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1559 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1562 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1563 ena_trc_err("Reg read timeout occurred\n");
1564 return ENA_COM_TIMER_EXPIRED;
1567 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1568 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1570 ena_trc_dbg("ENA dma width: %d\n", width);
1572 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1573 ena_trc_err("DMA width illegal value: %d\n", width);
1574 return ENA_COM_INVAL;
1577 ena_dev->dma_addr_bits = width;
1582 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1586 u32 ctrl_ver_masked;
1588 /* Make sure the ENA version and the controller version are at least
1589 * as the driver expects
1591 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1592 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1593 ENA_REGS_CONTROLLER_VERSION_OFF);
1595 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1596 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1597 ena_trc_err("Reg read timeout occurred\n");
1598 return ENA_COM_TIMER_EXPIRED;
1601 ena_trc_info("ena device version: %d.%d\n",
1602 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1603 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1604 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1606 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1607 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1608 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1609 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1610 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1611 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1612 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1613 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1616 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1617 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1618 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1620 /* Validate the ctrl version without the implementation ID */
1621 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1622 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1629 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1631 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1632 struct ena_com_admin_cq *cq = &admin_queue->cq;
1633 struct ena_com_admin_sq *sq = &admin_queue->sq;
1634 struct ena_com_aenq *aenq = &ena_dev->aenq;
1637 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1638 if (admin_queue->comp_ctx)
1639 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
1640 admin_queue->comp_ctx = NULL;
1641 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1643 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1644 sq->dma_addr, sq->mem_handle);
1647 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1649 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1650 cq->dma_addr, cq->mem_handle);
1653 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1654 if (ena_dev->aenq.entries)
1655 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1656 aenq->dma_addr, aenq->mem_handle);
1657 aenq->entries = NULL;
1658 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1661 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1666 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1668 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1669 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1670 ena_dev->admin_queue.polling = polling;
1673 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1675 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1677 ENA_SPINLOCK_INIT(mmio_read->lock);
1678 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1679 sizeof(*mmio_read->read_resp),
1680 mmio_read->read_resp,
1681 mmio_read->read_resp_dma_addr,
1682 mmio_read->read_resp_mem_handle);
1683 if (unlikely(!mmio_read->read_resp))
1686 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1688 mmio_read->read_resp->req_id = 0x0;
1689 mmio_read->seq_num = 0x0;
1690 mmio_read->readless_supported = true;
1695 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1696 return ENA_COM_NO_MEM;
1699 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1701 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1703 mmio_read->readless_supported = readless_supported;
1706 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1708 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1710 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1711 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1713 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1714 sizeof(*mmio_read->read_resp),
1715 mmio_read->read_resp,
1716 mmio_read->read_resp_dma_addr,
1717 mmio_read->read_resp_mem_handle);
1719 mmio_read->read_resp = NULL;
1720 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1723 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1725 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1726 u32 addr_low, addr_high;
1728 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1729 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1731 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1732 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1735 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1736 struct ena_aenq_handlers *aenq_handlers)
1738 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1739 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1742 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1744 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1745 ena_trc_err("Reg read timeout occurred\n");
1746 return ENA_COM_TIMER_EXPIRED;
1749 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1750 ena_trc_err("Device isn't ready, abort com init\n");
1751 return ENA_COM_NO_DEVICE;
1754 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1756 admin_queue->bus = ena_dev->bus;
1757 admin_queue->q_dmadev = ena_dev->dmadev;
1758 admin_queue->polling = false;
1759 admin_queue->curr_cmd_id = 0;
1761 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1763 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1765 ret = ena_com_init_comp_ctxt(admin_queue);
1769 ret = ena_com_admin_init_sq(admin_queue);
1773 ret = ena_com_admin_init_cq(admin_queue);
1777 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1778 ENA_REGS_AQ_DB_OFF);
1780 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1781 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1783 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1784 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1786 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1787 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1789 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1790 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1793 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1794 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1795 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1796 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1799 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1800 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1801 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1802 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1804 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1805 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1806 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1810 admin_queue->running_state = true;
1814 ena_com_admin_destroy(ena_dev);
1819 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1820 struct ena_com_create_io_ctx *ctx)
1822 struct ena_com_io_sq *io_sq;
1823 struct ena_com_io_cq *io_cq;
1826 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1827 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1828 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1829 return ENA_COM_INVAL;
1832 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1833 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1835 memset(io_sq, 0x0, sizeof(*io_sq));
1836 memset(io_cq, 0x0, sizeof(*io_cq));
1839 io_cq->q_depth = ctx->queue_size;
1840 io_cq->direction = ctx->direction;
1841 io_cq->qid = ctx->qid;
1843 io_cq->msix_vector = ctx->msix_vector;
1845 io_sq->q_depth = ctx->queue_size;
1846 io_sq->direction = ctx->direction;
1847 io_sq->qid = ctx->qid;
1849 io_sq->mem_queue_type = ctx->mem_queue_type;
1851 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1852 /* header length is limited to 8 bits */
1853 io_sq->tx_max_header_size =
1854 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1856 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1859 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1863 ret = ena_com_create_io_cq(ena_dev, io_cq);
1867 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1874 ena_com_destroy_io_cq(ena_dev, io_cq);
1876 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1880 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1882 struct ena_com_io_sq *io_sq;
1883 struct ena_com_io_cq *io_cq;
1885 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1886 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1887 qid, ENA_TOTAL_NUM_QUEUES);
1891 io_sq = &ena_dev->io_sq_queues[qid];
1892 io_cq = &ena_dev->io_cq_queues[qid];
1894 ena_com_destroy_io_sq(ena_dev, io_sq);
1895 ena_com_destroy_io_cq(ena_dev, io_cq);
1897 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1900 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1901 struct ena_admin_get_feat_resp *resp)
1903 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1906 int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
1908 struct ena_admin_get_feat_resp resp;
1909 struct ena_extra_properties_strings *extra_properties_strings =
1910 &ena_dev->extra_properties_strings;
1912 extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
1913 ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
1915 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1916 extra_properties_strings->size,
1917 extra_properties_strings->virt_addr,
1918 extra_properties_strings->dma_addr,
1919 extra_properties_strings->dma_handle);
1920 if (unlikely(!extra_properties_strings->virt_addr)) {
1921 ena_trc_err("Failed to allocate extra properties strings\n");
1925 rc = ena_com_get_feature_ex(ena_dev, &resp,
1926 ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
1927 extra_properties_strings->dma_addr,
1928 extra_properties_strings->size, 0);
1930 ena_trc_dbg("Failed to get extra properties strings\n");
1934 return resp.u.extra_properties_strings.count;
1936 ena_com_delete_extra_properties_strings(ena_dev);
1940 void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
1942 struct ena_extra_properties_strings *extra_properties_strings =
1943 &ena_dev->extra_properties_strings;
1945 if (extra_properties_strings->virt_addr) {
1946 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1947 extra_properties_strings->size,
1948 extra_properties_strings->virt_addr,
1949 extra_properties_strings->dma_addr,
1950 extra_properties_strings->dma_handle);
1951 extra_properties_strings->virt_addr = NULL;
1955 int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
1956 struct ena_admin_get_feat_resp *resp)
1958 return ena_com_get_feature(ena_dev, resp,
1959 ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0);
1962 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1963 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1965 struct ena_admin_get_feat_resp get_resp;
1968 rc = ena_com_get_feature(ena_dev, &get_resp,
1969 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1973 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1974 sizeof(get_resp.u.dev_attr));
1975 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1977 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1978 rc = ena_com_get_feature(ena_dev, &get_resp,
1979 ENA_ADMIN_MAX_QUEUES_EXT,
1980 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1984 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1987 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1988 sizeof(get_resp.u.max_queue_ext));
1989 ena_dev->tx_max_header_size =
1990 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1992 rc = ena_com_get_feature(ena_dev, &get_resp,
1993 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1994 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1995 sizeof(get_resp.u.max_queue));
1996 ena_dev->tx_max_header_size =
1997 get_resp.u.max_queue.max_header_size;
2003 rc = ena_com_get_feature(ena_dev, &get_resp,
2004 ENA_ADMIN_AENQ_CONFIG, 0);
2008 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2009 sizeof(get_resp.u.aenq));
2011 rc = ena_com_get_feature(ena_dev, &get_resp,
2012 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2016 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2017 sizeof(get_resp.u.offload));
2019 /* Driver hints isn't mandatory admin command. So in case the
2020 * command isn't supported set driver hints to 0
2022 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2025 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2026 sizeof(get_resp.u.hw_hints));
2027 else if (rc == ENA_COM_UNSUPPORTED)
2028 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2032 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2034 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2035 sizeof(get_resp.u.llq));
2036 else if (rc == ENA_COM_UNSUPPORTED)
2037 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2041 rc = ena_com_get_feature(ena_dev, &get_resp,
2042 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
2044 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
2045 sizeof(get_resp.u.ind_table));
2046 else if (rc == ENA_COM_UNSUPPORTED)
2047 memset(&get_feat_ctx->ind_table, 0x0,
2048 sizeof(get_feat_ctx->ind_table));
2055 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2057 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2060 /* ena_handle_specific_aenq_event:
2061 * return the handler that is relevant to the specific event group
2063 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2066 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2068 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2069 return aenq_handlers->handlers[group];
2071 return aenq_handlers->unimplemented_handler;
2074 /* ena_aenq_intr_handler:
2075 * handles the aenq incoming events.
2076 * pop events from the queue and apply the specific handler
2078 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2080 struct ena_admin_aenq_entry *aenq_e;
2081 struct ena_admin_aenq_common_desc *aenq_common;
2082 struct ena_com_aenq *aenq = &dev->aenq;
2083 unsigned long long timestamp;
2084 ena_aenq_handler handler_cb;
2085 u16 masked_head, processed = 0;
2088 masked_head = aenq->head & (aenq->q_depth - 1);
2089 phase = aenq->phase;
2090 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2091 aenq_common = &aenq_e->aenq_common_desc;
2093 /* Go over all the events */
2094 while ((READ_ONCE8(aenq_common->flags) &
2095 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2096 /* Make sure the phase bit (ownership) is as expected before
2097 * reading the rest of the descriptor.
2101 timestamp = (unsigned long long)aenq_common->timestamp_low |
2102 ((unsigned long long)aenq_common->timestamp_high << 32);
2103 ENA_TOUCH(timestamp); /* In case debug is disabled */
2104 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2106 aenq_common->syndrom,
2109 /* Handle specific event*/
2110 handler_cb = ena_com_get_specific_aenq_cb(dev,
2111 aenq_common->group);
2112 handler_cb(data, aenq_e); /* call the actual event handler*/
2114 /* Get next event entry */
2118 if (unlikely(masked_head == aenq->q_depth)) {
2122 aenq_e = &aenq->entries[masked_head];
2123 aenq_common = &aenq_e->aenq_common_desc;
2126 aenq->head += processed;
2127 aenq->phase = phase;
2129 /* Don't update aenq doorbell if there weren't any processed events */
2133 /* write the aenq doorbell after all AENQ descriptors were read */
2135 ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
2136 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2140 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2141 enum ena_regs_reset_reason_types reset_reason)
2143 u32 stat, timeout, cap, reset_val;
2146 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2147 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2149 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2150 (cap == ENA_MMIO_READ_TIMEOUT))) {
2151 ena_trc_err("Reg read32 timeout occurred\n");
2152 return ENA_COM_TIMER_EXPIRED;
2155 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2156 ena_trc_err("Device isn't ready, can't reset device\n");
2157 return ENA_COM_INVAL;
2160 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2161 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2163 ena_trc_err("Invalid timeout value\n");
2164 return ENA_COM_INVAL;
2168 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2169 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2170 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2171 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2173 /* Write again the MMIO read request address */
2174 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2176 rc = wait_for_reset_state(ena_dev, timeout,
2177 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2179 ena_trc_err("Reset indication didn't turn on\n");
2184 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2185 rc = wait_for_reset_state(ena_dev, timeout, 0);
2187 ena_trc_err("Reset indication didn't turn off\n");
2191 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2192 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2194 /* the resolution of timeout reg is 100ms */
2195 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2197 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2202 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2203 struct ena_com_stats_ctx *ctx,
2204 enum ena_admin_get_stats_type type)
2206 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2207 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2208 struct ena_com_admin_queue *admin_queue;
2211 admin_queue = &ena_dev->admin_queue;
2213 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2214 get_cmd->aq_common_descriptor.flags = 0;
2215 get_cmd->type = type;
2217 ret = ena_com_execute_admin_command(admin_queue,
2218 (struct ena_admin_aq_entry *)get_cmd,
2220 (struct ena_admin_acq_entry *)get_resp,
2224 ena_trc_err("Failed to get stats. error: %d\n", ret);
2229 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2230 struct ena_admin_basic_stats *stats)
2232 struct ena_com_stats_ctx ctx;
2235 memset(&ctx, 0x0, sizeof(ctx));
2236 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2237 if (likely(ret == 0))
2238 memcpy(stats, &ctx.get_resp.basic_stats,
2239 sizeof(ctx.get_resp.basic_stats));
2244 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2246 struct ena_com_admin_queue *admin_queue;
2247 struct ena_admin_set_feat_cmd cmd;
2248 struct ena_admin_set_feat_resp resp;
2251 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2252 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2253 return ENA_COM_UNSUPPORTED;
2256 memset(&cmd, 0x0, sizeof(cmd));
2257 admin_queue = &ena_dev->admin_queue;
2259 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2260 cmd.aq_common_descriptor.flags = 0;
2261 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2262 cmd.u.mtu.mtu = mtu;
2264 ret = ena_com_execute_admin_command(admin_queue,
2265 (struct ena_admin_aq_entry *)&cmd,
2267 (struct ena_admin_acq_entry *)&resp,
2271 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2276 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2277 struct ena_admin_feature_offload_desc *offload)
2280 struct ena_admin_get_feat_resp resp;
2282 ret = ena_com_get_feature(ena_dev, &resp,
2283 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2284 if (unlikely(ret)) {
2285 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2289 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2294 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2296 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2297 struct ena_rss *rss = &ena_dev->rss;
2298 struct ena_admin_set_feat_cmd cmd;
2299 struct ena_admin_set_feat_resp resp;
2300 struct ena_admin_get_feat_resp get_resp;
2303 if (!ena_com_check_supported_feature_id(ena_dev,
2304 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2305 ena_trc_dbg("Feature %d isn't supported\n",
2306 ENA_ADMIN_RSS_HASH_FUNCTION);
2307 return ENA_COM_UNSUPPORTED;
2310 /* Validate hash function is supported */
2311 ret = ena_com_get_feature(ena_dev, &get_resp,
2312 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2316 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2317 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2319 return ENA_COM_UNSUPPORTED;
2322 memset(&cmd, 0x0, sizeof(cmd));
2324 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2325 cmd.aq_common_descriptor.flags =
2326 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2327 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2328 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2329 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2331 ret = ena_com_mem_addr_set(ena_dev,
2332 &cmd.control_buffer.address,
2333 rss->hash_key_dma_addr);
2334 if (unlikely(ret)) {
2335 ena_trc_err("memory address set failed\n");
2339 cmd.control_buffer.length = sizeof(*rss->hash_key);
2341 ret = ena_com_execute_admin_command(admin_queue,
2342 (struct ena_admin_aq_entry *)&cmd,
2344 (struct ena_admin_acq_entry *)&resp,
2346 if (unlikely(ret)) {
2347 ena_trc_err("Failed to set hash function %d. error: %d\n",
2348 rss->hash_func, ret);
2349 return ENA_COM_INVAL;
2355 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2356 enum ena_admin_hash_functions func,
2357 const u8 *key, u16 key_len, u32 init_val)
2359 struct ena_rss *rss = &ena_dev->rss;
2360 struct ena_admin_get_feat_resp get_resp;
2361 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2365 /* Make sure size is a mult of DWs */
2366 if (unlikely(key_len & 0x3))
2367 return ENA_COM_INVAL;
2369 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2370 ENA_ADMIN_RSS_HASH_FUNCTION,
2371 rss->hash_key_dma_addr,
2372 sizeof(*rss->hash_key), 0);
2376 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2377 ena_trc_err("Flow hash function %d isn't supported\n", func);
2378 return ENA_COM_UNSUPPORTED;
2382 case ENA_ADMIN_TOEPLITZ:
2383 if (key_len > sizeof(hash_key->key)) {
2384 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2385 key_len, sizeof(hash_key->key));
2386 return ENA_COM_INVAL;
2389 memcpy(hash_key->key, key, key_len);
2390 rss->hash_init_val = init_val;
2391 hash_key->keys_num = key_len >> 2;
2393 case ENA_ADMIN_CRC32:
2394 rss->hash_init_val = init_val;
2397 ena_trc_err("Invalid hash function (%d)\n", func);
2398 return ENA_COM_INVAL;
2401 rc = ena_com_set_hash_function(ena_dev);
2403 /* Restore the old function */
2405 ena_com_get_hash_function(ena_dev, NULL, NULL);
2410 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2411 enum ena_admin_hash_functions *func,
2414 struct ena_rss *rss = &ena_dev->rss;
2415 struct ena_admin_get_feat_resp get_resp;
2416 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2420 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2421 ENA_ADMIN_RSS_HASH_FUNCTION,
2422 rss->hash_key_dma_addr,
2423 sizeof(*rss->hash_key), 0);
2427 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2429 *func = rss->hash_func;
2432 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2437 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2438 enum ena_admin_flow_hash_proto proto,
2441 struct ena_rss *rss = &ena_dev->rss;
2442 struct ena_admin_get_feat_resp get_resp;
2445 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2446 ENA_ADMIN_RSS_HASH_INPUT,
2447 rss->hash_ctrl_dma_addr,
2448 sizeof(*rss->hash_ctrl), 0);
2453 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2458 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2460 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2461 struct ena_rss *rss = &ena_dev->rss;
2462 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2463 struct ena_admin_set_feat_cmd cmd;
2464 struct ena_admin_set_feat_resp resp;
2467 if (!ena_com_check_supported_feature_id(ena_dev,
2468 ENA_ADMIN_RSS_HASH_INPUT)) {
2469 ena_trc_dbg("Feature %d isn't supported\n",
2470 ENA_ADMIN_RSS_HASH_INPUT);
2471 return ENA_COM_UNSUPPORTED;
2474 memset(&cmd, 0x0, sizeof(cmd));
2476 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2477 cmd.aq_common_descriptor.flags =
2478 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2479 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2480 cmd.u.flow_hash_input.enabled_input_sort =
2481 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2482 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2484 ret = ena_com_mem_addr_set(ena_dev,
2485 &cmd.control_buffer.address,
2486 rss->hash_ctrl_dma_addr);
2487 if (unlikely(ret)) {
2488 ena_trc_err("memory address set failed\n");
2491 cmd.control_buffer.length = sizeof(*hash_ctrl);
2493 ret = ena_com_execute_admin_command(admin_queue,
2494 (struct ena_admin_aq_entry *)&cmd,
2496 (struct ena_admin_acq_entry *)&resp,
2499 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2504 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2506 struct ena_rss *rss = &ena_dev->rss;
2507 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2509 u16 available_fields = 0;
2512 /* Get the supported hash input */
2513 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2517 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2518 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2519 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2521 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2522 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2523 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2525 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2526 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2527 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2529 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2530 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2531 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2533 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2534 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2536 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2537 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2539 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2540 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2542 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2543 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2545 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2546 available_fields = hash_ctrl->selected_fields[i].fields &
2547 hash_ctrl->supported_fields[i].fields;
2548 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2549 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2550 i, hash_ctrl->supported_fields[i].fields,
2551 hash_ctrl->selected_fields[i].fields);
2552 return ENA_COM_UNSUPPORTED;
2556 rc = ena_com_set_hash_ctrl(ena_dev);
2558 /* In case of failure, restore the old hash ctrl */
2560 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2565 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2566 enum ena_admin_flow_hash_proto proto,
2569 struct ena_rss *rss = &ena_dev->rss;
2570 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2571 u16 supported_fields;
2574 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2575 ena_trc_err("Invalid proto num (%u)\n", proto);
2576 return ENA_COM_INVAL;
2579 /* Get the ctrl table */
2580 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2584 /* Make sure all the fields are supported */
2585 supported_fields = hash_ctrl->supported_fields[proto].fields;
2586 if ((hash_fields & supported_fields) != hash_fields) {
2587 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2588 proto, hash_fields, supported_fields);
2591 hash_ctrl->selected_fields[proto].fields = hash_fields;
2593 rc = ena_com_set_hash_ctrl(ena_dev);
2595 /* In case of failure, restore the old hash ctrl */
2597 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2602 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2603 u16 entry_idx, u16 entry_value)
2605 struct ena_rss *rss = &ena_dev->rss;
2607 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2608 return ENA_COM_INVAL;
2610 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2611 return ENA_COM_INVAL;
2613 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2618 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2620 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2621 struct ena_rss *rss = &ena_dev->rss;
2622 struct ena_admin_set_feat_cmd cmd;
2623 struct ena_admin_set_feat_resp resp;
2626 if (!ena_com_check_supported_feature_id(ena_dev,
2627 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2628 ena_trc_dbg("Feature %d isn't supported\n",
2629 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2630 return ENA_COM_UNSUPPORTED;
2633 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2635 ena_trc_err("Failed to convert host indirection table to device table\n");
2639 memset(&cmd, 0x0, sizeof(cmd));
2641 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2642 cmd.aq_common_descriptor.flags =
2643 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2644 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2645 cmd.u.ind_table.size = rss->tbl_log_size;
2646 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2648 ret = ena_com_mem_addr_set(ena_dev,
2649 &cmd.control_buffer.address,
2650 rss->rss_ind_tbl_dma_addr);
2651 if (unlikely(ret)) {
2652 ena_trc_err("memory address set failed\n");
2656 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2657 sizeof(struct ena_admin_rss_ind_table_entry);
2659 ret = ena_com_execute_admin_command(admin_queue,
2660 (struct ena_admin_aq_entry *)&cmd,
2662 (struct ena_admin_acq_entry *)&resp,
2666 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2671 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2673 struct ena_rss *rss = &ena_dev->rss;
2674 struct ena_admin_get_feat_resp get_resp;
2678 tbl_size = (1ULL << rss->tbl_log_size) *
2679 sizeof(struct ena_admin_rss_ind_table_entry);
2681 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2682 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2683 rss->rss_ind_tbl_dma_addr,
2691 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2695 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2696 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2701 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2705 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2707 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2711 rc = ena_com_hash_key_allocate(ena_dev);
2715 rc = ena_com_hash_ctrl_init(ena_dev);
2722 ena_com_hash_key_destroy(ena_dev);
2724 ena_com_indirect_table_destroy(ena_dev);
2730 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2732 ena_com_indirect_table_destroy(ena_dev);
2733 ena_com_hash_key_destroy(ena_dev);
2734 ena_com_hash_ctrl_destroy(ena_dev);
2736 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2739 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2741 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2743 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2745 host_attr->host_info,
2746 host_attr->host_info_dma_addr,
2747 host_attr->host_info_dma_handle);
2748 if (unlikely(!host_attr->host_info))
2749 return ENA_COM_NO_MEM;
2751 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2752 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2753 (ENA_COMMON_SPEC_VERSION_MINOR));
2758 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2759 u32 debug_area_size)
2761 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2763 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2765 host_attr->debug_area_virt_addr,
2766 host_attr->debug_area_dma_addr,
2767 host_attr->debug_area_dma_handle);
2768 if (unlikely(!host_attr->debug_area_virt_addr)) {
2769 host_attr->debug_area_size = 0;
2770 return ENA_COM_NO_MEM;
2773 host_attr->debug_area_size = debug_area_size;
2778 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2780 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2782 if (host_attr->host_info) {
2783 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2785 host_attr->host_info,
2786 host_attr->host_info_dma_addr,
2787 host_attr->host_info_dma_handle);
2788 host_attr->host_info = NULL;
2792 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2794 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2796 if (host_attr->debug_area_virt_addr) {
2797 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2798 host_attr->debug_area_size,
2799 host_attr->debug_area_virt_addr,
2800 host_attr->debug_area_dma_addr,
2801 host_attr->debug_area_dma_handle);
2802 host_attr->debug_area_virt_addr = NULL;
2806 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2808 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2809 struct ena_com_admin_queue *admin_queue;
2810 struct ena_admin_set_feat_cmd cmd;
2811 struct ena_admin_set_feat_resp resp;
2815 /* Host attribute config is called before ena_com_get_dev_attr_feat
2816 * so ena_com can't check if the feature is supported.
2819 memset(&cmd, 0x0, sizeof(cmd));
2820 admin_queue = &ena_dev->admin_queue;
2822 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2823 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2825 ret = ena_com_mem_addr_set(ena_dev,
2826 &cmd.u.host_attr.debug_ba,
2827 host_attr->debug_area_dma_addr);
2828 if (unlikely(ret)) {
2829 ena_trc_err("memory address set failed\n");
2833 ret = ena_com_mem_addr_set(ena_dev,
2834 &cmd.u.host_attr.os_info_ba,
2835 host_attr->host_info_dma_addr);
2836 if (unlikely(ret)) {
2837 ena_trc_err("memory address set failed\n");
2841 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2843 ret = ena_com_execute_admin_command(admin_queue,
2844 (struct ena_admin_aq_entry *)&cmd,
2846 (struct ena_admin_acq_entry *)&resp,
2850 ena_trc_err("Failed to set host attributes: %d\n", ret);
2855 /* Interrupt moderation */
2856 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2858 return ena_com_check_supported_feature_id(ena_dev,
2859 ENA_ADMIN_INTERRUPT_MODERATION);
2862 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2863 u32 tx_coalesce_usecs)
2865 if (!ena_dev->intr_delay_resolution) {
2866 ena_trc_err("Illegal interrupt delay granularity value\n");
2867 return ENA_COM_FAULT;
2870 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2871 ena_dev->intr_delay_resolution;
2876 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2877 u32 rx_coalesce_usecs)
2879 if (!ena_dev->intr_delay_resolution) {
2880 ena_trc_err("Illegal interrupt delay granularity value\n");
2881 return ENA_COM_FAULT;
2884 /* We use LOWEST entry of moderation table for storing
2885 * nonadaptive interrupt coalescing values
2887 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2888 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2893 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2895 if (ena_dev->intr_moder_tbl)
2896 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2897 ena_dev->intr_moder_tbl = NULL;
2900 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2902 struct ena_admin_get_feat_resp get_resp;
2903 u16 delay_resolution;
2906 rc = ena_com_get_feature(ena_dev, &get_resp,
2907 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2910 if (rc == ENA_COM_UNSUPPORTED) {
2911 ena_trc_dbg("Feature %d isn't supported\n",
2912 ENA_ADMIN_INTERRUPT_MODERATION);
2915 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2919 /* no moderation supported, disable adaptive support */
2920 ena_com_disable_adaptive_moderation(ena_dev);
2924 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2928 /* if moderation is supported by device we set adaptive moderation */
2929 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2930 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2931 ena_com_enable_adaptive_moderation(ena_dev);
2935 ena_com_destroy_interrupt_moderation(ena_dev);
2939 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2941 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2943 if (!intr_moder_tbl)
2946 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2947 ENA_INTR_LOWEST_USECS;
2948 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2949 ENA_INTR_LOWEST_PKTS;
2950 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2951 ENA_INTR_LOWEST_BYTES;
2953 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2955 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2957 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2960 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2962 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2964 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2967 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2968 ENA_INTR_HIGH_USECS;
2969 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2971 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2972 ENA_INTR_HIGH_BYTES;
2974 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2975 ENA_INTR_HIGHEST_USECS;
2976 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2977 ENA_INTR_HIGHEST_PKTS;
2978 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2979 ENA_INTR_HIGHEST_BYTES;
2982 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2984 return ena_dev->intr_moder_tx_interval;
2987 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2989 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2992 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2997 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2998 enum ena_intr_moder_level level,
2999 struct ena_intr_moder_entry *entry)
3001 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3003 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3006 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
3007 if (ena_dev->intr_delay_resolution)
3008 intr_moder_tbl[level].intr_moder_interval /=
3009 ena_dev->intr_delay_resolution;
3010 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
3012 /* use hardcoded value until ethtool supports bytecount parameter */
3013 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
3014 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
3017 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
3018 enum ena_intr_moder_level level,
3019 struct ena_intr_moder_entry *entry)
3021 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3023 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3026 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
3027 if (ena_dev->intr_delay_resolution)
3028 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
3029 entry->pkts_per_interval =
3030 intr_moder_tbl[level].pkts_per_interval;
3031 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
3034 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3035 struct ena_admin_feature_llq_desc *llq_features,
3036 struct ena_llq_configurations *llq_default_cfg)
3041 if (!llq_features->max_llq_num) {
3042 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3046 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3050 /* Validate the descriptor is not too big */
3051 size = ena_dev->tx_max_header_size;
3052 size += ena_dev->llq_info.descs_num_before_header *
3053 sizeof(struct ena_eth_io_tx_desc);
3055 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
3056 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
3057 return ENA_COM_INVAL;
3060 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;