1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
8 /*****************************************************************************/
9 /*****************************************************************************/
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
17 #define ENA_CTRL_MAJOR 0
18 #define ENA_CTRL_MINOR 0
19 #define ENA_CTRL_SUB_MINOR 1
21 #define MIN_ENA_CTRL_VER \
22 (((ENA_CTRL_MAJOR) << \
23 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
24 ((ENA_CTRL_MINOR) << \
25 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
28 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
29 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
31 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
33 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
35 #define ENA_REGS_ADMIN_INTR_MASK 1
39 /*****************************************************************************/
40 /*****************************************************************************/
41 /*****************************************************************************/
46 /* Abort - canceled by the driver */
51 ena_wait_event_t wait_event;
52 struct ena_admin_acq_entry *user_cqe;
54 enum ena_cmd_status status;
55 /* status from the device */
61 struct ena_com_stats_ctx {
62 struct ena_admin_aq_get_stats_cmd get_cmd;
63 struct ena_admin_acq_get_stats_resp get_resp;
66 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
67 struct ena_common_mem_addr *ena_addr,
70 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
71 ena_trc_err("dma address has more bits that the device supports\n");
75 ena_addr->mem_addr_low = lower_32_bits(addr);
76 ena_addr->mem_addr_high = upper_32_bits(addr);
81 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
83 struct ena_com_admin_sq *sq = &queue->sq;
84 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
86 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
90 ena_trc_err("memory allocation failed\n");
91 return ENA_COM_NO_MEM;
103 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
105 struct ena_com_admin_cq *cq = &queue->cq;
106 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
108 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
112 ena_trc_err("memory allocation failed\n");
113 return ENA_COM_NO_MEM;
122 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
123 struct ena_aenq_handlers *aenq_handlers)
125 struct ena_com_aenq *aenq = &dev->aenq;
126 u32 addr_low, addr_high, aenq_caps;
129 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
130 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
131 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
136 if (!aenq->entries) {
137 ena_trc_err("memory allocation failed\n");
138 return ENA_COM_NO_MEM;
141 aenq->head = aenq->q_depth;
144 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
145 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
147 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
148 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
151 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
152 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
153 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
154 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
155 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
157 if (unlikely(!aenq_handlers)) {
158 ena_trc_err("aenq handlers pointer is NULL\n");
159 return ENA_COM_INVAL;
162 aenq->aenq_handlers = aenq_handlers;
167 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
168 struct ena_comp_ctx *comp_ctx)
170 comp_ctx->occupied = false;
171 ATOMIC32_DEC(&queue->outstanding_cmds);
174 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
175 u16 command_id, bool capture)
177 if (unlikely(command_id >= queue->q_depth)) {
178 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
179 command_id, queue->q_depth);
183 if (unlikely(!queue->comp_ctx)) {
184 ena_trc_err("Completion context is NULL\n");
188 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
189 ena_trc_err("Completion context is occupied\n");
194 ATOMIC32_INC(&queue->outstanding_cmds);
195 queue->comp_ctx[command_id].occupied = true;
198 return &queue->comp_ctx[command_id];
201 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
202 struct ena_admin_aq_entry *cmd,
203 size_t cmd_size_in_bytes,
204 struct ena_admin_acq_entry *comp,
205 size_t comp_size_in_bytes)
207 struct ena_comp_ctx *comp_ctx;
208 u16 tail_masked, cmd_id;
212 queue_size_mask = admin_queue->q_depth - 1;
214 tail_masked = admin_queue->sq.tail & queue_size_mask;
216 /* In case of queue FULL */
217 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
218 if (cnt >= admin_queue->q_depth) {
219 ena_trc_dbg("admin queue is full.\n");
220 admin_queue->stats.out_of_space++;
221 return ERR_PTR(ENA_COM_NO_SPACE);
224 cmd_id = admin_queue->curr_cmd_id;
226 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
227 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
229 cmd->aq_common_descriptor.command_id |= cmd_id &
230 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
232 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
233 if (unlikely(!comp_ctx))
234 return ERR_PTR(ENA_COM_INVAL);
236 comp_ctx->status = ENA_CMD_SUBMITTED;
237 comp_ctx->comp_size = (u32)comp_size_in_bytes;
238 comp_ctx->user_cqe = comp;
239 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
241 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
243 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
245 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
248 admin_queue->sq.tail++;
249 admin_queue->stats.submitted_cmd++;
251 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
252 admin_queue->sq.phase = !admin_queue->sq.phase;
254 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
255 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
256 admin_queue->sq.db_addr);
261 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
263 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
264 struct ena_comp_ctx *comp_ctx;
267 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
268 if (unlikely(!queue->comp_ctx)) {
269 ena_trc_err("memory allocation failed\n");
270 return ENA_COM_NO_MEM;
273 for (i = 0; i < queue->q_depth; i++) {
274 comp_ctx = get_comp_ctxt(queue, i, false);
276 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
282 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
283 struct ena_admin_aq_entry *cmd,
284 size_t cmd_size_in_bytes,
285 struct ena_admin_acq_entry *comp,
286 size_t comp_size_in_bytes)
288 unsigned long flags = 0;
289 struct ena_comp_ctx *comp_ctx;
291 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
292 if (unlikely(!admin_queue->running_state)) {
293 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
294 return ERR_PTR(ENA_COM_NO_DEVICE);
296 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
300 if (IS_ERR(comp_ctx))
301 admin_queue->running_state = false;
302 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
307 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
308 struct ena_com_create_io_ctx *ctx,
309 struct ena_com_io_sq *io_sq)
314 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
316 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
317 io_sq->desc_entry_size =
318 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
319 sizeof(struct ena_eth_io_tx_desc) :
320 sizeof(struct ena_eth_io_rx_desc);
322 size = io_sq->desc_entry_size * io_sq->q_depth;
323 io_sq->bus = ena_dev->bus;
325 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
326 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
328 io_sq->desc_addr.virt_addr,
329 io_sq->desc_addr.phys_addr,
330 io_sq->desc_addr.mem_handle,
333 if (!io_sq->desc_addr.virt_addr) {
334 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
336 io_sq->desc_addr.virt_addr,
337 io_sq->desc_addr.phys_addr,
338 io_sq->desc_addr.mem_handle);
341 if (!io_sq->desc_addr.virt_addr) {
342 ena_trc_err("memory allocation failed\n");
343 return ENA_COM_NO_MEM;
347 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
348 /* Allocate bounce buffers */
349 io_sq->bounce_buf_ctrl.buffer_size =
350 ena_dev->llq_info.desc_list_entry_size;
351 io_sq->bounce_buf_ctrl.buffers_num =
352 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
353 io_sq->bounce_buf_ctrl.next_to_use = 0;
355 size = io_sq->bounce_buf_ctrl.buffer_size *
356 io_sq->bounce_buf_ctrl.buffers_num;
358 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
360 io_sq->bounce_buf_ctrl.base_buffer,
363 if (!io_sq->bounce_buf_ctrl.base_buffer)
364 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
366 if (!io_sq->bounce_buf_ctrl.base_buffer) {
367 ena_trc_err("bounce buffer memory allocation failed\n");
368 return ENA_COM_NO_MEM;
371 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
372 sizeof(io_sq->llq_info));
374 /* Initiate the first bounce buffer */
375 io_sq->llq_buf_ctrl.curr_bounce_buf =
376 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
377 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
378 0x0, io_sq->llq_info.desc_list_entry_size);
379 io_sq->llq_buf_ctrl.descs_left_in_line =
380 io_sq->llq_info.descs_num_before_header;
382 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
383 io_sq->entries_in_tx_burst_left =
384 io_sq->llq_info.max_entries_in_tx_burst;
388 io_sq->next_to_comp = 0;
394 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
395 struct ena_com_create_io_ctx *ctx,
396 struct ena_com_io_cq *io_cq)
401 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
403 /* Use the basic completion descriptor for Rx */
404 io_cq->cdesc_entry_size_in_bytes =
405 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
406 sizeof(struct ena_eth_io_tx_cdesc) :
407 sizeof(struct ena_eth_io_rx_cdesc_base);
409 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
410 io_cq->bus = ena_dev->bus;
412 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
414 io_cq->cdesc_addr.virt_addr,
415 io_cq->cdesc_addr.phys_addr,
416 io_cq->cdesc_addr.mem_handle,
419 if (!io_cq->cdesc_addr.virt_addr) {
420 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
422 io_cq->cdesc_addr.virt_addr,
423 io_cq->cdesc_addr.phys_addr,
424 io_cq->cdesc_addr.mem_handle);
427 if (!io_cq->cdesc_addr.virt_addr) {
428 ena_trc_err("memory allocation failed\n");
429 return ENA_COM_NO_MEM;
438 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
439 struct ena_admin_acq_entry *cqe)
441 struct ena_comp_ctx *comp_ctx;
444 cmd_id = cqe->acq_common_descriptor.command &
445 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
447 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
448 if (unlikely(!comp_ctx)) {
449 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
450 admin_queue->running_state = false;
454 comp_ctx->status = ENA_CMD_COMPLETED;
455 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
457 if (comp_ctx->user_cqe)
458 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
460 if (!admin_queue->polling)
461 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
464 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
466 struct ena_admin_acq_entry *cqe = NULL;
471 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
472 phase = admin_queue->cq.phase;
474 cqe = &admin_queue->cq.entries[head_masked];
476 /* Go over all the completions */
477 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
478 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
479 /* Do not read the rest of the completion entry before the
480 * phase bit was validated
483 ena_com_handle_single_admin_completion(admin_queue, cqe);
487 if (unlikely(head_masked == admin_queue->q_depth)) {
492 cqe = &admin_queue->cq.entries[head_masked];
495 admin_queue->cq.head += comp_num;
496 admin_queue->cq.phase = phase;
497 admin_queue->sq.head += comp_num;
498 admin_queue->stats.completed_cmd += comp_num;
501 static int ena_com_comp_status_to_errno(u8 comp_status)
503 if (unlikely(comp_status != 0))
504 ena_trc_err("admin command failed[%u]\n", comp_status);
506 switch (comp_status) {
507 case ENA_ADMIN_SUCCESS:
509 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
510 return ENA_COM_NO_MEM;
511 case ENA_ADMIN_UNSUPPORTED_OPCODE:
512 return ENA_COM_UNSUPPORTED;
513 case ENA_ADMIN_BAD_OPCODE:
514 case ENA_ADMIN_MALFORMED_REQUEST:
515 case ENA_ADMIN_ILLEGAL_PARAMETER:
516 case ENA_ADMIN_UNKNOWN_ERROR:
517 return ENA_COM_INVAL;
520 return ENA_COM_INVAL;
523 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
524 struct ena_com_admin_queue *admin_queue)
526 unsigned long flags = 0;
530 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
533 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
534 ena_com_handle_admin_completion(admin_queue);
535 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
537 if (comp_ctx->status != ENA_CMD_SUBMITTED)
540 if (ENA_TIME_EXPIRE(timeout)) {
541 ena_trc_err("Wait for completion (polling) timeout\n");
542 /* ENA didn't have any completion */
543 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
544 admin_queue->stats.no_completion++;
545 admin_queue->running_state = false;
546 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
548 ret = ENA_COM_TIMER_EXPIRED;
552 ENA_MSLEEP(ENA_POLL_MS);
555 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
556 ena_trc_err("Command was aborted\n");
557 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
558 admin_queue->stats.aborted_cmd++;
559 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
560 ret = ENA_COM_NO_DEVICE;
564 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
565 "Invalid comp status %d\n", comp_ctx->status);
567 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
569 comp_ctxt_release(admin_queue, comp_ctx);
574 * Set the LLQ configurations of the firmware
576 * The driver provides only the enabled feature values to the device,
577 * which in turn, checks if they are supported.
579 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
581 struct ena_com_admin_queue *admin_queue;
582 struct ena_admin_set_feat_cmd cmd;
583 struct ena_admin_set_feat_resp resp;
584 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
587 memset(&cmd, 0x0, sizeof(cmd));
588 admin_queue = &ena_dev->admin_queue;
590 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
591 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
593 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
594 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
595 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
596 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
598 ret = ena_com_execute_admin_command(admin_queue,
599 (struct ena_admin_aq_entry *)&cmd,
601 (struct ena_admin_acq_entry *)&resp,
605 ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
610 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
611 struct ena_admin_feature_llq_desc *llq_features,
612 struct ena_llq_configurations *llq_default_cfg)
614 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
618 memset(llq_info, 0, sizeof(*llq_info));
620 supported_feat = llq_features->header_location_ctrl_supported;
622 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
623 llq_info->header_location_ctrl =
624 llq_default_cfg->llq_header_location;
626 ena_trc_err("Invalid header location control, supported: 0x%x\n",
631 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
632 supported_feat = llq_features->descriptors_stride_ctrl_supported;
633 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
634 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
636 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
637 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
638 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
639 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
641 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
646 ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
647 llq_default_cfg->llq_stride_ctrl,
649 llq_info->desc_stride_ctrl);
652 llq_info->desc_stride_ctrl = 0;
655 supported_feat = llq_features->entry_size_ctrl_supported;
656 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
657 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
658 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
660 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
661 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
662 llq_info->desc_list_entry_size = 128;
663 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
664 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
665 llq_info->desc_list_entry_size = 192;
666 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
667 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
668 llq_info->desc_list_entry_size = 256;
670 ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
674 ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
675 llq_default_cfg->llq_ring_entry_size,
677 llq_info->desc_list_entry_size);
679 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
680 /* The desc list entry size should be whole multiply of 8
681 * This requirement comes from __iowrite64_copy()
683 ena_trc_err("illegal entry size %d\n",
684 llq_info->desc_list_entry_size);
688 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
689 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
690 sizeof(struct ena_eth_io_tx_desc);
692 llq_info->descs_per_entry = 1;
694 supported_feat = llq_features->desc_num_before_header_supported;
695 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
696 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
698 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
699 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
700 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
701 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
702 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
703 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
704 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
705 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
707 ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
712 ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
713 llq_default_cfg->llq_num_decs_before_header,
715 llq_info->descs_num_before_header);
718 llq_info->max_entries_in_tx_burst =
719 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
721 rc = ena_com_set_llq(ena_dev);
723 ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
728 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
729 struct ena_com_admin_queue *admin_queue)
731 unsigned long flags = 0;
734 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
735 admin_queue->completion_timeout);
737 /* In case the command wasn't completed find out the root cause.
738 * There might be 2 kinds of errors
739 * 1) No completion (timeout reached)
740 * 2) There is completion but the device didn't get any msi-x interrupt.
742 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
743 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
744 ena_com_handle_admin_completion(admin_queue);
745 admin_queue->stats.no_completion++;
746 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
748 if (comp_ctx->status == ENA_CMD_COMPLETED) {
749 ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
750 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
751 /* Check if fallback to polling is enabled */
752 if (admin_queue->auto_polling)
753 admin_queue->polling = true;
755 ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
756 comp_ctx->cmd_opcode, comp_ctx->status);
758 /* Check if shifted to polling mode.
759 * This will happen if there is a completion without an interrupt
760 * and autopolling mode is enabled. Continuing normal execution in such case
762 if (!admin_queue->polling) {
763 admin_queue->running_state = false;
764 ret = ENA_COM_TIMER_EXPIRED;
769 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
771 comp_ctxt_release(admin_queue, comp_ctx);
775 /* This method read the hardware device register through posting writes
776 * and waiting for response
777 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
779 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
781 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
782 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
783 mmio_read->read_resp;
784 u32 mmio_read_reg, ret, i;
785 unsigned long flags = 0;
786 u32 timeout = mmio_read->reg_read_to;
791 timeout = ENA_REG_READ_TIMEOUT;
793 /* If readless is disabled, perform regular read */
794 if (!mmio_read->readless_supported)
795 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
797 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
798 mmio_read->seq_num++;
800 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
801 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
802 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
803 mmio_read_reg |= mmio_read->seq_num &
804 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
806 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
807 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
809 for (i = 0; i < timeout; i++) {
810 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
816 if (unlikely(i == timeout)) {
817 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
822 ret = ENA_MMIO_READ_TIMEOUT;
826 if (read_resp->reg_off != offset) {
827 ena_trc_err("Read failure: wrong offset provided\n");
828 ret = ENA_MMIO_READ_TIMEOUT;
830 ret = read_resp->reg_val;
833 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
838 /* There are two types to wait for completion.
839 * Polling mode - wait until the completion is available.
840 * Async mode - wait on wait queue until the completion is ready
841 * (or the timeout expired).
842 * It is expected that the IRQ called ena_com_handle_admin_completion
843 * to mark the completions.
845 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
846 struct ena_com_admin_queue *admin_queue)
848 if (admin_queue->polling)
849 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
852 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
856 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
857 struct ena_com_io_sq *io_sq)
859 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
860 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
861 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
865 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
867 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
868 direction = ENA_ADMIN_SQ_DIRECTION_TX;
870 direction = ENA_ADMIN_SQ_DIRECTION_RX;
872 destroy_cmd.sq.sq_identity |= (direction <<
873 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
874 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
876 destroy_cmd.sq.sq_idx = io_sq->idx;
877 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
879 ret = ena_com_execute_admin_command(admin_queue,
880 (struct ena_admin_aq_entry *)&destroy_cmd,
882 (struct ena_admin_acq_entry *)&destroy_resp,
883 sizeof(destroy_resp));
885 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
886 ena_trc_err("failed to destroy io sq error: %d\n", ret);
891 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
892 struct ena_com_io_sq *io_sq,
893 struct ena_com_io_cq *io_cq)
897 if (io_cq->cdesc_addr.virt_addr) {
898 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
900 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
902 io_cq->cdesc_addr.virt_addr,
903 io_cq->cdesc_addr.phys_addr,
904 io_cq->cdesc_addr.mem_handle);
906 io_cq->cdesc_addr.virt_addr = NULL;
909 if (io_sq->desc_addr.virt_addr) {
910 size = io_sq->desc_entry_size * io_sq->q_depth;
912 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
914 io_sq->desc_addr.virt_addr,
915 io_sq->desc_addr.phys_addr,
916 io_sq->desc_addr.mem_handle);
918 io_sq->desc_addr.virt_addr = NULL;
921 if (io_sq->bounce_buf_ctrl.base_buffer) {
922 ENA_MEM_FREE(ena_dev->dmadev,
923 io_sq->bounce_buf_ctrl.base_buffer,
924 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
925 io_sq->bounce_buf_ctrl.base_buffer = NULL;
929 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
934 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
935 timeout = (timeout * 100) / ENA_POLL_MS;
937 for (i = 0; i < timeout; i++) {
938 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
940 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
941 ena_trc_err("Reg read timeout occurred\n");
942 return ENA_COM_TIMER_EXPIRED;
945 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
949 ENA_MSLEEP(ENA_POLL_MS);
952 return ENA_COM_TIMER_EXPIRED;
955 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
956 enum ena_admin_aq_feature_id feature_id)
958 u32 feature_mask = 1 << feature_id;
960 /* Device attributes is always supported */
961 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
962 !(ena_dev->supported_features & feature_mask))
968 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
969 struct ena_admin_get_feat_resp *get_resp,
970 enum ena_admin_aq_feature_id feature_id,
971 dma_addr_t control_buf_dma_addr,
972 u32 control_buff_size,
975 struct ena_com_admin_queue *admin_queue;
976 struct ena_admin_get_feat_cmd get_cmd;
979 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
980 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
981 return ENA_COM_UNSUPPORTED;
984 memset(&get_cmd, 0x0, sizeof(get_cmd));
985 admin_queue = &ena_dev->admin_queue;
987 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
989 if (control_buff_size)
990 get_cmd.aq_common_descriptor.flags =
991 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
993 get_cmd.aq_common_descriptor.flags = 0;
995 ret = ena_com_mem_addr_set(ena_dev,
996 &get_cmd.control_buffer.address,
997 control_buf_dma_addr);
999 ena_trc_err("memory address set failed\n");
1003 get_cmd.control_buffer.length = control_buff_size;
1004 get_cmd.feat_common.feature_version = feature_ver;
1005 get_cmd.feat_common.feature_id = feature_id;
1007 ret = ena_com_execute_admin_command(admin_queue,
1008 (struct ena_admin_aq_entry *)
1011 (struct ena_admin_acq_entry *)
1016 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
1022 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1023 struct ena_admin_get_feat_resp *get_resp,
1024 enum ena_admin_aq_feature_id feature_id,
1027 return ena_com_get_feature_ex(ena_dev,
1035 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1037 struct ena_rss *rss = &ena_dev->rss;
1039 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1040 sizeof(*rss->hash_key),
1042 rss->hash_key_dma_addr,
1043 rss->hash_key_mem_handle);
1045 if (unlikely(!rss->hash_key))
1046 return ENA_COM_NO_MEM;
1051 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1053 struct ena_rss *rss = &ena_dev->rss;
1056 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1057 sizeof(*rss->hash_key),
1059 rss->hash_key_dma_addr,
1060 rss->hash_key_mem_handle);
1061 rss->hash_key = NULL;
1064 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1066 struct ena_rss *rss = &ena_dev->rss;
1068 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1069 sizeof(*rss->hash_ctrl),
1071 rss->hash_ctrl_dma_addr,
1072 rss->hash_ctrl_mem_handle);
1074 if (unlikely(!rss->hash_ctrl))
1075 return ENA_COM_NO_MEM;
1080 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1082 struct ena_rss *rss = &ena_dev->rss;
1085 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1086 sizeof(*rss->hash_ctrl),
1088 rss->hash_ctrl_dma_addr,
1089 rss->hash_ctrl_mem_handle);
1090 rss->hash_ctrl = NULL;
1093 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1096 struct ena_rss *rss = &ena_dev->rss;
1097 struct ena_admin_get_feat_resp get_resp;
1101 ret = ena_com_get_feature(ena_dev, &get_resp,
1102 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1106 if ((get_resp.u.ind_table.min_size > log_size) ||
1107 (get_resp.u.ind_table.max_size < log_size)) {
1108 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1110 1 << get_resp.u.ind_table.min_size,
1111 1 << get_resp.u.ind_table.max_size);
1112 return ENA_COM_INVAL;
1115 tbl_size = (1ULL << log_size) *
1116 sizeof(struct ena_admin_rss_ind_table_entry);
1118 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1121 rss->rss_ind_tbl_dma_addr,
1122 rss->rss_ind_tbl_mem_handle);
1123 if (unlikely(!rss->rss_ind_tbl))
1126 tbl_size = (1ULL << log_size) * sizeof(u16);
1127 rss->host_rss_ind_tbl =
1128 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1129 if (unlikely(!rss->host_rss_ind_tbl))
1132 rss->tbl_log_size = log_size;
1137 tbl_size = (1ULL << log_size) *
1138 sizeof(struct ena_admin_rss_ind_table_entry);
1140 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1143 rss->rss_ind_tbl_dma_addr,
1144 rss->rss_ind_tbl_mem_handle);
1145 rss->rss_ind_tbl = NULL;
1147 rss->tbl_log_size = 0;
1148 return ENA_COM_NO_MEM;
1151 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1153 struct ena_rss *rss = &ena_dev->rss;
1154 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1155 sizeof(struct ena_admin_rss_ind_table_entry);
1157 if (rss->rss_ind_tbl)
1158 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1161 rss->rss_ind_tbl_dma_addr,
1162 rss->rss_ind_tbl_mem_handle);
1163 rss->rss_ind_tbl = NULL;
1165 if (rss->host_rss_ind_tbl)
1166 ENA_MEM_FREE(ena_dev->dmadev,
1167 rss->host_rss_ind_tbl,
1168 ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1169 rss->host_rss_ind_tbl = NULL;
1172 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1173 struct ena_com_io_sq *io_sq, u16 cq_idx)
1175 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1176 struct ena_admin_aq_create_sq_cmd create_cmd;
1177 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1181 memset(&create_cmd, 0x0, sizeof(create_cmd));
1183 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1185 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1186 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1188 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1190 create_cmd.sq_identity |= (direction <<
1191 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1192 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1194 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1195 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1197 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1198 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1199 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1201 create_cmd.sq_caps_3 |=
1202 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1204 create_cmd.cq_idx = cq_idx;
1205 create_cmd.sq_depth = io_sq->q_depth;
1207 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1208 ret = ena_com_mem_addr_set(ena_dev,
1210 io_sq->desc_addr.phys_addr);
1211 if (unlikely(ret)) {
1212 ena_trc_err("memory address set failed\n");
1217 ret = ena_com_execute_admin_command(admin_queue,
1218 (struct ena_admin_aq_entry *)&create_cmd,
1220 (struct ena_admin_acq_entry *)&cmd_completion,
1221 sizeof(cmd_completion));
1222 if (unlikely(ret)) {
1223 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1227 io_sq->idx = cmd_completion.sq_idx;
1229 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1230 (uintptr_t)cmd_completion.sq_doorbell_offset);
1232 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1233 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1234 + cmd_completion.llq_headers_offset);
1236 io_sq->desc_addr.pbuf_dev_addr =
1237 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1238 cmd_completion.llq_descriptors_offset);
1241 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1246 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1248 struct ena_rss *rss = &ena_dev->rss;
1249 struct ena_com_io_sq *io_sq;
1253 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1254 qid = rss->host_rss_ind_tbl[i];
1255 if (qid >= ENA_TOTAL_NUM_QUEUES)
1256 return ENA_COM_INVAL;
1258 io_sq = &ena_dev->io_sq_queues[qid];
1260 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1261 return ENA_COM_INVAL;
1263 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1269 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1271 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1272 struct ena_rss *rss = &ena_dev->rss;
1276 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1277 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1279 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1280 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1281 return ENA_COM_INVAL;
1282 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1284 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1285 return ENA_COM_INVAL;
1287 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1293 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1297 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1299 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1300 if (!ena_dev->intr_moder_tbl)
1301 return ENA_COM_NO_MEM;
1303 ena_com_config_default_interrupt_moderation_table(ena_dev);
1308 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1309 u16 intr_delay_resolution)
1311 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1314 if (!intr_delay_resolution) {
1315 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1316 intr_delay_resolution = 1;
1318 ena_dev->intr_delay_resolution = intr_delay_resolution;
1321 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1322 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1325 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1328 /*****************************************************************************/
1329 /******************************* API ******************************/
1330 /*****************************************************************************/
1332 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1333 struct ena_admin_aq_entry *cmd,
1335 struct ena_admin_acq_entry *comp,
1338 struct ena_comp_ctx *comp_ctx;
1341 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1343 if (IS_ERR(comp_ctx)) {
1344 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1345 ena_trc_dbg("Failed to submit command [%ld]\n",
1348 ena_trc_err("Failed to submit command [%ld]\n",
1351 return PTR_ERR(comp_ctx);
1354 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1355 if (unlikely(ret)) {
1356 if (admin_queue->running_state)
1357 ena_trc_err("Failed to process command. ret = %d\n",
1360 ena_trc_dbg("Failed to process command. ret = %d\n",
1366 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1367 struct ena_com_io_cq *io_cq)
1369 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1370 struct ena_admin_aq_create_cq_cmd create_cmd;
1371 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1374 memset(&create_cmd, 0x0, sizeof(create_cmd));
1376 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1378 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1379 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1380 create_cmd.cq_caps_1 |=
1381 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1383 create_cmd.msix_vector = io_cq->msix_vector;
1384 create_cmd.cq_depth = io_cq->q_depth;
1386 ret = ena_com_mem_addr_set(ena_dev,
1388 io_cq->cdesc_addr.phys_addr);
1389 if (unlikely(ret)) {
1390 ena_trc_err("memory address set failed\n");
1394 ret = ena_com_execute_admin_command(admin_queue,
1395 (struct ena_admin_aq_entry *)&create_cmd,
1397 (struct ena_admin_acq_entry *)&cmd_completion,
1398 sizeof(cmd_completion));
1399 if (unlikely(ret)) {
1400 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1404 io_cq->idx = cmd_completion.cq_idx;
1406 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1407 cmd_completion.cq_interrupt_unmask_register_offset);
1409 if (cmd_completion.cq_head_db_register_offset)
1410 io_cq->cq_head_db_reg =
1411 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1412 cmd_completion.cq_head_db_register_offset);
1414 if (cmd_completion.numa_node_register_offset)
1415 io_cq->numa_node_cfg_reg =
1416 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1417 cmd_completion.numa_node_register_offset);
1419 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1424 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1425 struct ena_com_io_sq **io_sq,
1426 struct ena_com_io_cq **io_cq)
1428 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1429 ena_trc_err("Invalid queue number %d but the max is %d\n",
1430 qid, ENA_TOTAL_NUM_QUEUES);
1431 return ENA_COM_INVAL;
1434 *io_sq = &ena_dev->io_sq_queues[qid];
1435 *io_cq = &ena_dev->io_cq_queues[qid];
1440 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1442 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1443 struct ena_comp_ctx *comp_ctx;
1446 if (!admin_queue->comp_ctx)
1449 for (i = 0; i < admin_queue->q_depth; i++) {
1450 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1451 if (unlikely(!comp_ctx))
1454 comp_ctx->status = ENA_CMD_ABORTED;
1456 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1460 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1462 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1463 unsigned long flags = 0;
1465 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1466 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1467 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1468 ENA_MSLEEP(ENA_POLL_MS);
1469 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1471 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1474 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1475 struct ena_com_io_cq *io_cq)
1477 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1478 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1479 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1482 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1484 destroy_cmd.cq_idx = io_cq->idx;
1485 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1487 ret = ena_com_execute_admin_command(admin_queue,
1488 (struct ena_admin_aq_entry *)&destroy_cmd,
1489 sizeof(destroy_cmd),
1490 (struct ena_admin_acq_entry *)&destroy_resp,
1491 sizeof(destroy_resp));
1493 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1494 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1499 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1501 return ena_dev->admin_queue.running_state;
1504 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1506 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1507 unsigned long flags = 0;
1509 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1510 ena_dev->admin_queue.running_state = state;
1511 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1514 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1516 u16 depth = ena_dev->aenq.q_depth;
1518 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1520 /* Init head_db to mark that all entries in the queue
1521 * are initially available
1523 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1526 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1528 struct ena_com_admin_queue *admin_queue;
1529 struct ena_admin_set_feat_cmd cmd;
1530 struct ena_admin_set_feat_resp resp;
1531 struct ena_admin_get_feat_resp get_resp;
1534 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1536 ena_trc_info("Can't get aenq configuration\n");
1540 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1541 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1542 get_resp.u.aenq.supported_groups,
1544 return ENA_COM_UNSUPPORTED;
1547 memset(&cmd, 0x0, sizeof(cmd));
1548 admin_queue = &ena_dev->admin_queue;
1550 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1551 cmd.aq_common_descriptor.flags = 0;
1552 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1553 cmd.u.aenq.enabled_groups = groups_flag;
1555 ret = ena_com_execute_admin_command(admin_queue,
1556 (struct ena_admin_aq_entry *)&cmd,
1558 (struct ena_admin_acq_entry *)&resp,
1562 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1567 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1569 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1572 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1573 ena_trc_err("Reg read timeout occurred\n");
1574 return ENA_COM_TIMER_EXPIRED;
1577 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1578 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1580 ena_trc_dbg("ENA dma width: %d\n", width);
1582 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1583 ena_trc_err("DMA width illegal value: %d\n", width);
1584 return ENA_COM_INVAL;
1587 ena_dev->dma_addr_bits = width;
1592 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1596 u32 ctrl_ver_masked;
1598 /* Make sure the ENA version and the controller version are at least
1599 * as the driver expects
1601 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1602 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1603 ENA_REGS_CONTROLLER_VERSION_OFF);
1605 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1606 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1607 ena_trc_err("Reg read timeout occurred\n");
1608 return ENA_COM_TIMER_EXPIRED;
1611 ena_trc_info("ena device version: %d.%d\n",
1612 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1613 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1614 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1616 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1617 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1618 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1619 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1620 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1621 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1622 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1623 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1626 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1627 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1628 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1630 /* Validate the ctrl version without the implementation ID */
1631 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1632 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1639 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1641 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1642 struct ena_com_admin_cq *cq = &admin_queue->cq;
1643 struct ena_com_admin_sq *sq = &admin_queue->sq;
1644 struct ena_com_aenq *aenq = &ena_dev->aenq;
1647 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1648 if (admin_queue->comp_ctx)
1649 ENA_MEM_FREE(ena_dev->dmadev,
1650 admin_queue->comp_ctx,
1651 (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1652 admin_queue->comp_ctx = NULL;
1653 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1655 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1656 sq->dma_addr, sq->mem_handle);
1659 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1661 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1662 cq->dma_addr, cq->mem_handle);
1665 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1666 if (ena_dev->aenq.entries)
1667 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1668 aenq->dma_addr, aenq->mem_handle);
1669 aenq->entries = NULL;
1670 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1673 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1678 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1680 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1681 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1682 ena_dev->admin_queue.polling = polling;
1685 bool ena_com_get_admin_polling_mode(struct ena_com_dev * ena_dev)
1687 return ena_dev->admin_queue.polling;
1690 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1693 ena_dev->admin_queue.auto_polling = polling;
1696 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1698 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1700 ENA_SPINLOCK_INIT(mmio_read->lock);
1701 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1702 sizeof(*mmio_read->read_resp),
1703 mmio_read->read_resp,
1704 mmio_read->read_resp_dma_addr,
1705 mmio_read->read_resp_mem_handle);
1706 if (unlikely(!mmio_read->read_resp))
1709 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1711 mmio_read->read_resp->req_id = 0x0;
1712 mmio_read->seq_num = 0x0;
1713 mmio_read->readless_supported = true;
1718 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1719 return ENA_COM_NO_MEM;
1722 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1724 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1726 mmio_read->readless_supported = readless_supported;
1729 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1731 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1733 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1734 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1736 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1737 sizeof(*mmio_read->read_resp),
1738 mmio_read->read_resp,
1739 mmio_read->read_resp_dma_addr,
1740 mmio_read->read_resp_mem_handle);
1742 mmio_read->read_resp = NULL;
1743 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1746 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1748 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1749 u32 addr_low, addr_high;
1751 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1752 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1754 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1755 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1758 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1759 struct ena_aenq_handlers *aenq_handlers)
1761 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1762 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1765 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1767 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1768 ena_trc_err("Reg read timeout occurred\n");
1769 return ENA_COM_TIMER_EXPIRED;
1772 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1773 ena_trc_err("Device isn't ready, abort com init\n");
1774 return ENA_COM_NO_DEVICE;
1777 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1779 admin_queue->bus = ena_dev->bus;
1780 admin_queue->q_dmadev = ena_dev->dmadev;
1781 admin_queue->polling = false;
1782 admin_queue->curr_cmd_id = 0;
1784 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1786 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1788 ret = ena_com_init_comp_ctxt(admin_queue);
1792 ret = ena_com_admin_init_sq(admin_queue);
1796 ret = ena_com_admin_init_cq(admin_queue);
1800 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1801 ENA_REGS_AQ_DB_OFF);
1803 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1804 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1806 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1807 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1809 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1810 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1812 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1813 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1816 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1817 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1818 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1819 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1822 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1823 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1824 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1825 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1827 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1828 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1829 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1833 admin_queue->running_state = true;
1837 ena_com_admin_destroy(ena_dev);
1842 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1843 struct ena_com_create_io_ctx *ctx)
1845 struct ena_com_io_sq *io_sq;
1846 struct ena_com_io_cq *io_cq;
1849 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1850 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1851 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1852 return ENA_COM_INVAL;
1855 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1856 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1858 memset(io_sq, 0x0, sizeof(*io_sq));
1859 memset(io_cq, 0x0, sizeof(*io_cq));
1862 io_cq->q_depth = ctx->queue_size;
1863 io_cq->direction = ctx->direction;
1864 io_cq->qid = ctx->qid;
1866 io_cq->msix_vector = ctx->msix_vector;
1868 io_sq->q_depth = ctx->queue_size;
1869 io_sq->direction = ctx->direction;
1870 io_sq->qid = ctx->qid;
1872 io_sq->mem_queue_type = ctx->mem_queue_type;
1874 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1875 /* header length is limited to 8 bits */
1876 io_sq->tx_max_header_size =
1877 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1879 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1882 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1886 ret = ena_com_create_io_cq(ena_dev, io_cq);
1890 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1897 ena_com_destroy_io_cq(ena_dev, io_cq);
1899 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1903 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1905 struct ena_com_io_sq *io_sq;
1906 struct ena_com_io_cq *io_cq;
1908 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1909 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1910 qid, ENA_TOTAL_NUM_QUEUES);
1914 io_sq = &ena_dev->io_sq_queues[qid];
1915 io_cq = &ena_dev->io_cq_queues[qid];
1917 ena_com_destroy_io_sq(ena_dev, io_sq);
1918 ena_com_destroy_io_cq(ena_dev, io_cq);
1920 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1923 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1924 struct ena_admin_get_feat_resp *resp)
1926 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1929 int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
1931 struct ena_admin_get_feat_resp resp;
1932 struct ena_extra_properties_strings *extra_properties_strings =
1933 &ena_dev->extra_properties_strings;
1935 extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
1936 ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
1938 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1939 extra_properties_strings->size,
1940 extra_properties_strings->virt_addr,
1941 extra_properties_strings->dma_addr,
1942 extra_properties_strings->dma_handle);
1943 if (unlikely(!extra_properties_strings->virt_addr)) {
1944 ena_trc_err("Failed to allocate extra properties strings\n");
1948 rc = ena_com_get_feature_ex(ena_dev, &resp,
1949 ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
1950 extra_properties_strings->dma_addr,
1951 extra_properties_strings->size, 0);
1953 ena_trc_dbg("Failed to get extra properties strings\n");
1957 return resp.u.extra_properties_strings.count;
1959 ena_com_delete_extra_properties_strings(ena_dev);
1963 void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
1965 struct ena_extra_properties_strings *extra_properties_strings =
1966 &ena_dev->extra_properties_strings;
1968 if (extra_properties_strings->virt_addr) {
1969 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1970 extra_properties_strings->size,
1971 extra_properties_strings->virt_addr,
1972 extra_properties_strings->dma_addr,
1973 extra_properties_strings->dma_handle);
1974 extra_properties_strings->virt_addr = NULL;
1978 int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
1979 struct ena_admin_get_feat_resp *resp)
1981 return ena_com_get_feature(ena_dev, resp,
1982 ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0);
1985 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1986 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1988 struct ena_admin_get_feat_resp get_resp;
1991 rc = ena_com_get_feature(ena_dev, &get_resp,
1992 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1996 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1997 sizeof(get_resp.u.dev_attr));
1998 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2000 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2001 rc = ena_com_get_feature(ena_dev, &get_resp,
2002 ENA_ADMIN_MAX_QUEUES_EXT,
2003 ENA_FEATURE_MAX_QUEUE_EXT_VER);
2007 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
2010 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2011 sizeof(get_resp.u.max_queue_ext));
2012 ena_dev->tx_max_header_size =
2013 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
2015 rc = ena_com_get_feature(ena_dev, &get_resp,
2016 ENA_ADMIN_MAX_QUEUES_NUM, 0);
2017 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2018 sizeof(get_resp.u.max_queue));
2019 ena_dev->tx_max_header_size =
2020 get_resp.u.max_queue.max_header_size;
2026 rc = ena_com_get_feature(ena_dev, &get_resp,
2027 ENA_ADMIN_AENQ_CONFIG, 0);
2031 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2032 sizeof(get_resp.u.aenq));
2034 rc = ena_com_get_feature(ena_dev, &get_resp,
2035 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2039 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2040 sizeof(get_resp.u.offload));
2042 /* Driver hints isn't mandatory admin command. So in case the
2043 * command isn't supported set driver hints to 0
2045 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2048 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2049 sizeof(get_resp.u.hw_hints));
2050 else if (rc == ENA_COM_UNSUPPORTED)
2051 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2055 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2057 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2058 sizeof(get_resp.u.llq));
2059 else if (rc == ENA_COM_UNSUPPORTED)
2060 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2064 rc = ena_com_get_feature(ena_dev, &get_resp,
2065 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
2067 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
2068 sizeof(get_resp.u.ind_table));
2069 else if (rc == ENA_COM_UNSUPPORTED)
2070 memset(&get_feat_ctx->ind_table, 0x0,
2071 sizeof(get_feat_ctx->ind_table));
2078 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2080 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2083 /* ena_handle_specific_aenq_event:
2084 * return the handler that is relevant to the specific event group
2086 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2089 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2091 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2092 return aenq_handlers->handlers[group];
2094 return aenq_handlers->unimplemented_handler;
2097 /* ena_aenq_intr_handler:
2098 * handles the aenq incoming events.
2099 * pop events from the queue and apply the specific handler
2101 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2103 struct ena_admin_aenq_entry *aenq_e;
2104 struct ena_admin_aenq_common_desc *aenq_common;
2105 struct ena_com_aenq *aenq = &dev->aenq;
2107 ena_aenq_handler handler_cb;
2108 u16 masked_head, processed = 0;
2111 masked_head = aenq->head & (aenq->q_depth - 1);
2112 phase = aenq->phase;
2113 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2114 aenq_common = &aenq_e->aenq_common_desc;
2116 /* Go over all the events */
2117 while ((READ_ONCE8(aenq_common->flags) &
2118 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2119 /* Make sure the phase bit (ownership) is as expected before
2120 * reading the rest of the descriptor.
2124 timestamp = (u64)aenq_common->timestamp_low |
2125 ((u64)aenq_common->timestamp_high << 32);
2126 ENA_TOUCH(timestamp); /* In case debug is disabled */
2127 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%"PRIu64"]\n",
2129 aenq_common->syndrom,
2132 /* Handle specific event*/
2133 handler_cb = ena_com_get_specific_aenq_cb(dev,
2134 aenq_common->group);
2135 handler_cb(data, aenq_e); /* call the actual event handler*/
2137 /* Get next event entry */
2141 if (unlikely(masked_head == aenq->q_depth)) {
2145 aenq_e = &aenq->entries[masked_head];
2146 aenq_common = &aenq_e->aenq_common_desc;
2149 aenq->head += processed;
2150 aenq->phase = phase;
2152 /* Don't update aenq doorbell if there weren't any processed events */
2156 /* write the aenq doorbell after all AENQ descriptors were read */
2158 ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
2159 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2160 #ifndef MMIOWB_NOT_DEFINED
2165 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2166 enum ena_regs_reset_reason_types reset_reason)
2168 u32 stat, timeout, cap, reset_val;
2171 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2172 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2174 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2175 (cap == ENA_MMIO_READ_TIMEOUT))) {
2176 ena_trc_err("Reg read32 timeout occurred\n");
2177 return ENA_COM_TIMER_EXPIRED;
2180 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2181 ena_trc_err("Device isn't ready, can't reset device\n");
2182 return ENA_COM_INVAL;
2185 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2186 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2188 ena_trc_err("Invalid timeout value\n");
2189 return ENA_COM_INVAL;
2193 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2194 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2195 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2196 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2198 /* Write again the MMIO read request address */
2199 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2201 rc = wait_for_reset_state(ena_dev, timeout,
2202 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2204 ena_trc_err("Reset indication didn't turn on\n");
2209 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2210 rc = wait_for_reset_state(ena_dev, timeout, 0);
2212 ena_trc_err("Reset indication didn't turn off\n");
2216 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2217 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2219 /* the resolution of timeout reg is 100ms */
2220 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2222 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2227 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2228 struct ena_com_stats_ctx *ctx,
2229 enum ena_admin_get_stats_type type)
2231 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2232 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2233 struct ena_com_admin_queue *admin_queue;
2236 admin_queue = &ena_dev->admin_queue;
2238 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2239 get_cmd->aq_common_descriptor.flags = 0;
2240 get_cmd->type = type;
2242 ret = ena_com_execute_admin_command(admin_queue,
2243 (struct ena_admin_aq_entry *)get_cmd,
2245 (struct ena_admin_acq_entry *)get_resp,
2249 ena_trc_err("Failed to get stats. error: %d\n", ret);
2254 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2255 struct ena_admin_basic_stats *stats)
2257 struct ena_com_stats_ctx ctx;
2260 memset(&ctx, 0x0, sizeof(ctx));
2261 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2262 if (likely(ret == 0))
2263 memcpy(stats, &ctx.get_resp.basic_stats,
2264 sizeof(ctx.get_resp.basic_stats));
2269 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2271 struct ena_com_admin_queue *admin_queue;
2272 struct ena_admin_set_feat_cmd cmd;
2273 struct ena_admin_set_feat_resp resp;
2276 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2277 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2278 return ENA_COM_UNSUPPORTED;
2281 memset(&cmd, 0x0, sizeof(cmd));
2282 admin_queue = &ena_dev->admin_queue;
2284 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2285 cmd.aq_common_descriptor.flags = 0;
2286 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2287 cmd.u.mtu.mtu = mtu;
2289 ret = ena_com_execute_admin_command(admin_queue,
2290 (struct ena_admin_aq_entry *)&cmd,
2292 (struct ena_admin_acq_entry *)&resp,
2296 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2301 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2302 struct ena_admin_feature_offload_desc *offload)
2305 struct ena_admin_get_feat_resp resp;
2307 ret = ena_com_get_feature(ena_dev, &resp,
2308 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2309 if (unlikely(ret)) {
2310 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2314 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2319 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2321 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2322 struct ena_rss *rss = &ena_dev->rss;
2323 struct ena_admin_set_feat_cmd cmd;
2324 struct ena_admin_set_feat_resp resp;
2325 struct ena_admin_get_feat_resp get_resp;
2328 if (!ena_com_check_supported_feature_id(ena_dev,
2329 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2330 ena_trc_dbg("Feature %d isn't supported\n",
2331 ENA_ADMIN_RSS_HASH_FUNCTION);
2332 return ENA_COM_UNSUPPORTED;
2335 /* Validate hash function is supported */
2336 ret = ena_com_get_feature(ena_dev, &get_resp,
2337 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2341 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2342 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2344 return ENA_COM_UNSUPPORTED;
2347 memset(&cmd, 0x0, sizeof(cmd));
2349 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2350 cmd.aq_common_descriptor.flags =
2351 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2352 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2353 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2354 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2356 ret = ena_com_mem_addr_set(ena_dev,
2357 &cmd.control_buffer.address,
2358 rss->hash_key_dma_addr);
2359 if (unlikely(ret)) {
2360 ena_trc_err("memory address set failed\n");
2364 cmd.control_buffer.length = sizeof(*rss->hash_key);
2366 ret = ena_com_execute_admin_command(admin_queue,
2367 (struct ena_admin_aq_entry *)&cmd,
2369 (struct ena_admin_acq_entry *)&resp,
2371 if (unlikely(ret)) {
2372 ena_trc_err("Failed to set hash function %d. error: %d\n",
2373 rss->hash_func, ret);
2374 return ENA_COM_INVAL;
2380 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2381 enum ena_admin_hash_functions func,
2382 const u8 *key, u16 key_len, u32 init_val)
2384 struct ena_rss *rss = &ena_dev->rss;
2385 struct ena_admin_get_feat_resp get_resp;
2386 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2390 /* Make sure size is a mult of DWs */
2391 if (unlikely(key_len & 0x3))
2392 return ENA_COM_INVAL;
2394 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2395 ENA_ADMIN_RSS_HASH_FUNCTION,
2396 rss->hash_key_dma_addr,
2397 sizeof(*rss->hash_key), 0);
2401 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2402 ena_trc_err("Flow hash function %d isn't supported\n", func);
2403 return ENA_COM_UNSUPPORTED;
2407 case ENA_ADMIN_TOEPLITZ:
2408 if (key_len > sizeof(hash_key->key)) {
2409 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2410 key_len, sizeof(hash_key->key));
2411 return ENA_COM_INVAL;
2414 memcpy(hash_key->key, key, key_len);
2415 rss->hash_init_val = init_val;
2416 hash_key->keys_num = key_len >> 2;
2418 case ENA_ADMIN_CRC32:
2419 rss->hash_init_val = init_val;
2422 ena_trc_err("Invalid hash function (%d)\n", func);
2423 return ENA_COM_INVAL;
2426 rss->hash_func = func;
2427 rc = ena_com_set_hash_function(ena_dev);
2429 /* Restore the old function */
2431 ena_com_get_hash_function(ena_dev, NULL, NULL);
2436 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2437 enum ena_admin_hash_functions *func,
2440 struct ena_rss *rss = &ena_dev->rss;
2441 struct ena_admin_get_feat_resp get_resp;
2442 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2446 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2447 ENA_ADMIN_RSS_HASH_FUNCTION,
2448 rss->hash_key_dma_addr,
2449 sizeof(*rss->hash_key), 0);
2453 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2455 *func = rss->hash_func;
2458 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2463 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2464 enum ena_admin_flow_hash_proto proto,
2467 struct ena_rss *rss = &ena_dev->rss;
2468 struct ena_admin_get_feat_resp get_resp;
2471 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2472 ENA_ADMIN_RSS_HASH_INPUT,
2473 rss->hash_ctrl_dma_addr,
2474 sizeof(*rss->hash_ctrl), 0);
2479 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2484 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2486 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2487 struct ena_rss *rss = &ena_dev->rss;
2488 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2489 struct ena_admin_set_feat_cmd cmd;
2490 struct ena_admin_set_feat_resp resp;
2493 if (!ena_com_check_supported_feature_id(ena_dev,
2494 ENA_ADMIN_RSS_HASH_INPUT)) {
2495 ena_trc_dbg("Feature %d isn't supported\n",
2496 ENA_ADMIN_RSS_HASH_INPUT);
2497 return ENA_COM_UNSUPPORTED;
2500 memset(&cmd, 0x0, sizeof(cmd));
2502 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2503 cmd.aq_common_descriptor.flags =
2504 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2505 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2506 cmd.u.flow_hash_input.enabled_input_sort =
2507 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2508 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2510 ret = ena_com_mem_addr_set(ena_dev,
2511 &cmd.control_buffer.address,
2512 rss->hash_ctrl_dma_addr);
2513 if (unlikely(ret)) {
2514 ena_trc_err("memory address set failed\n");
2517 cmd.control_buffer.length = sizeof(*hash_ctrl);
2519 ret = ena_com_execute_admin_command(admin_queue,
2520 (struct ena_admin_aq_entry *)&cmd,
2522 (struct ena_admin_acq_entry *)&resp,
2525 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2530 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2532 struct ena_rss *rss = &ena_dev->rss;
2533 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2535 u16 available_fields = 0;
2538 /* Get the supported hash input */
2539 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2543 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2544 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2545 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2547 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2548 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2549 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2551 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2552 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2553 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2555 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2556 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2557 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2559 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2560 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2562 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2563 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2565 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2566 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2568 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2569 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2571 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2572 available_fields = hash_ctrl->selected_fields[i].fields &
2573 hash_ctrl->supported_fields[i].fields;
2574 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2575 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2576 i, hash_ctrl->supported_fields[i].fields,
2577 hash_ctrl->selected_fields[i].fields);
2578 return ENA_COM_UNSUPPORTED;
2582 rc = ena_com_set_hash_ctrl(ena_dev);
2584 /* In case of failure, restore the old hash ctrl */
2586 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2591 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2592 enum ena_admin_flow_hash_proto proto,
2595 struct ena_rss *rss = &ena_dev->rss;
2596 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2597 u16 supported_fields;
2600 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2601 ena_trc_err("Invalid proto num (%u)\n", proto);
2602 return ENA_COM_INVAL;
2605 /* Get the ctrl table */
2606 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2610 /* Make sure all the fields are supported */
2611 supported_fields = hash_ctrl->supported_fields[proto].fields;
2612 if ((hash_fields & supported_fields) != hash_fields) {
2613 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2614 proto, hash_fields, supported_fields);
2617 hash_ctrl->selected_fields[proto].fields = hash_fields;
2619 rc = ena_com_set_hash_ctrl(ena_dev);
2621 /* In case of failure, restore the old hash ctrl */
2623 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2628 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2629 u16 entry_idx, u16 entry_value)
2631 struct ena_rss *rss = &ena_dev->rss;
2633 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2634 return ENA_COM_INVAL;
2636 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2637 return ENA_COM_INVAL;
2639 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2644 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2646 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2647 struct ena_rss *rss = &ena_dev->rss;
2648 struct ena_admin_set_feat_cmd cmd;
2649 struct ena_admin_set_feat_resp resp;
2652 if (!ena_com_check_supported_feature_id(ena_dev,
2653 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2654 ena_trc_dbg("Feature %d isn't supported\n",
2655 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2656 return ENA_COM_UNSUPPORTED;
2659 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2661 ena_trc_err("Failed to convert host indirection table to device table\n");
2665 memset(&cmd, 0x0, sizeof(cmd));
2667 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2668 cmd.aq_common_descriptor.flags =
2669 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2670 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2671 cmd.u.ind_table.size = rss->tbl_log_size;
2672 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2674 ret = ena_com_mem_addr_set(ena_dev,
2675 &cmd.control_buffer.address,
2676 rss->rss_ind_tbl_dma_addr);
2677 if (unlikely(ret)) {
2678 ena_trc_err("memory address set failed\n");
2682 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2683 sizeof(struct ena_admin_rss_ind_table_entry);
2685 ret = ena_com_execute_admin_command(admin_queue,
2686 (struct ena_admin_aq_entry *)&cmd,
2688 (struct ena_admin_acq_entry *)&resp,
2692 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2697 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2699 struct ena_rss *rss = &ena_dev->rss;
2700 struct ena_admin_get_feat_resp get_resp;
2704 tbl_size = (1ULL << rss->tbl_log_size) *
2705 sizeof(struct ena_admin_rss_ind_table_entry);
2707 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2708 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2709 rss->rss_ind_tbl_dma_addr,
2717 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2721 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2722 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2727 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2731 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2733 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2737 rc = ena_com_hash_key_allocate(ena_dev);
2741 rc = ena_com_hash_ctrl_init(ena_dev);
2748 ena_com_hash_key_destroy(ena_dev);
2750 ena_com_indirect_table_destroy(ena_dev);
2756 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2758 ena_com_indirect_table_destroy(ena_dev);
2759 ena_com_hash_key_destroy(ena_dev);
2760 ena_com_hash_ctrl_destroy(ena_dev);
2762 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2765 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2767 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2769 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2771 host_attr->host_info,
2772 host_attr->host_info_dma_addr,
2773 host_attr->host_info_dma_handle);
2774 if (unlikely(!host_attr->host_info))
2775 return ENA_COM_NO_MEM;
2777 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2778 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2779 (ENA_COMMON_SPEC_VERSION_MINOR));
2784 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2785 u32 debug_area_size)
2787 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2789 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2791 host_attr->debug_area_virt_addr,
2792 host_attr->debug_area_dma_addr,
2793 host_attr->debug_area_dma_handle);
2794 if (unlikely(!host_attr->debug_area_virt_addr)) {
2795 host_attr->debug_area_size = 0;
2796 return ENA_COM_NO_MEM;
2799 host_attr->debug_area_size = debug_area_size;
2804 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2806 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2808 if (host_attr->host_info) {
2809 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2811 host_attr->host_info,
2812 host_attr->host_info_dma_addr,
2813 host_attr->host_info_dma_handle);
2814 host_attr->host_info = NULL;
2818 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2820 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2822 if (host_attr->debug_area_virt_addr) {
2823 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2824 host_attr->debug_area_size,
2825 host_attr->debug_area_virt_addr,
2826 host_attr->debug_area_dma_addr,
2827 host_attr->debug_area_dma_handle);
2828 host_attr->debug_area_virt_addr = NULL;
2832 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2834 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2835 struct ena_com_admin_queue *admin_queue;
2836 struct ena_admin_set_feat_cmd cmd;
2837 struct ena_admin_set_feat_resp resp;
2841 /* Host attribute config is called before ena_com_get_dev_attr_feat
2842 * so ena_com can't check if the feature is supported.
2845 memset(&cmd, 0x0, sizeof(cmd));
2846 admin_queue = &ena_dev->admin_queue;
2848 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2849 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2851 ret = ena_com_mem_addr_set(ena_dev,
2852 &cmd.u.host_attr.debug_ba,
2853 host_attr->debug_area_dma_addr);
2854 if (unlikely(ret)) {
2855 ena_trc_err("memory address set failed\n");
2859 ret = ena_com_mem_addr_set(ena_dev,
2860 &cmd.u.host_attr.os_info_ba,
2861 host_attr->host_info_dma_addr);
2862 if (unlikely(ret)) {
2863 ena_trc_err("memory address set failed\n");
2867 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2869 ret = ena_com_execute_admin_command(admin_queue,
2870 (struct ena_admin_aq_entry *)&cmd,
2872 (struct ena_admin_acq_entry *)&resp,
2876 ena_trc_err("Failed to set host attributes: %d\n", ret);
2881 /* Interrupt moderation */
2882 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2884 return ena_com_check_supported_feature_id(ena_dev,
2885 ENA_ADMIN_INTERRUPT_MODERATION);
2888 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2889 u32 tx_coalesce_usecs)
2891 if (!ena_dev->intr_delay_resolution) {
2892 ena_trc_err("Illegal interrupt delay granularity value\n");
2893 return ENA_COM_FAULT;
2896 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2897 ena_dev->intr_delay_resolution;
2902 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2903 u32 rx_coalesce_usecs)
2905 if (!ena_dev->intr_delay_resolution) {
2906 ena_trc_err("Illegal interrupt delay granularity value\n");
2907 return ENA_COM_FAULT;
2910 /* We use LOWEST entry of moderation table for storing
2911 * nonadaptive interrupt coalescing values
2913 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2914 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2919 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2921 if (ena_dev->intr_moder_tbl)
2922 ENA_MEM_FREE(ena_dev->dmadev,
2923 ena_dev->intr_moder_tbl,
2924 (sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS));
2925 ena_dev->intr_moder_tbl = NULL;
2928 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2930 struct ena_admin_get_feat_resp get_resp;
2931 u16 delay_resolution;
2934 rc = ena_com_get_feature(ena_dev, &get_resp,
2935 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2938 if (rc == ENA_COM_UNSUPPORTED) {
2939 ena_trc_dbg("Feature %d isn't supported\n",
2940 ENA_ADMIN_INTERRUPT_MODERATION);
2943 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2947 /* no moderation supported, disable adaptive support */
2948 ena_com_disable_adaptive_moderation(ena_dev);
2952 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2956 /* if moderation is supported by device we set adaptive moderation */
2957 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2958 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2960 /* Disable adaptive moderation by default - can be enabled later */
2961 ena_com_disable_adaptive_moderation(ena_dev);
2965 ena_com_destroy_interrupt_moderation(ena_dev);
2969 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2971 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2973 if (!intr_moder_tbl)
2976 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2977 ENA_INTR_LOWEST_USECS;
2978 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2979 ENA_INTR_LOWEST_PKTS;
2980 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2981 ENA_INTR_LOWEST_BYTES;
2983 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2985 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2987 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2990 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2992 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2994 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2997 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2998 ENA_INTR_HIGH_USECS;
2999 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
3001 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
3002 ENA_INTR_HIGH_BYTES;
3004 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
3005 ENA_INTR_HIGHEST_USECS;
3006 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
3007 ENA_INTR_HIGHEST_PKTS;
3008 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
3009 ENA_INTR_HIGHEST_BYTES;
3012 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
3014 return ena_dev->intr_moder_tx_interval;
3017 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
3019 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3022 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
3027 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
3028 enum ena_intr_moder_level level,
3029 struct ena_intr_moder_entry *entry)
3031 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3033 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3036 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
3037 if (ena_dev->intr_delay_resolution)
3038 intr_moder_tbl[level].intr_moder_interval /=
3039 ena_dev->intr_delay_resolution;
3040 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
3042 /* use hardcoded value until ethtool supports bytecount parameter */
3043 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
3044 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
3047 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
3048 enum ena_intr_moder_level level,
3049 struct ena_intr_moder_entry *entry)
3051 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3053 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3056 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
3057 if (ena_dev->intr_delay_resolution)
3058 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
3059 entry->pkts_per_interval =
3060 intr_moder_tbl[level].pkts_per_interval;
3061 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
3064 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3065 struct ena_admin_feature_llq_desc *llq_features,
3066 struct ena_llq_configurations *llq_default_cfg)
3069 struct ena_com_llq_info *llq_info = &(ena_dev->llq_info);;
3071 if (!llq_features->max_llq_num) {
3072 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3076 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3080 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3081 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
3083 if (ena_dev->tx_max_header_size == 0) {
3084 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
3088 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;