8b51660a45da7ed5f01ee2f00dd00ff10341082d
[dpdk.git] / ena_com.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
3  * All rights reserved.
4  */
5
6 #include "ena_com.h"
7
8 /*****************************************************************************/
9 /*****************************************************************************/
10
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
13
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
16
17
18 #define ENA_CTRL_MAJOR          0
19 #define ENA_CTRL_MINOR          0
20 #define ENA_CTRL_SUB_MINOR      1
21
22 #define MIN_ENA_CTRL_VER \
23         (((ENA_CTRL_MAJOR) << \
24         (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
25         ((ENA_CTRL_MINOR) << \
26         (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
27         (ENA_CTRL_SUB_MINOR))
28
29 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)   ((u32)((u64)(x)))
30 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)  ((u32)(((u64)(x)) >> 32))
31
32 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
33
34 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
35
36 #define ENA_REGS_ADMIN_INTR_MASK 1
37
38 #define ENA_POLL_MS     5
39
40 /*****************************************************************************/
41 /*****************************************************************************/
42 /*****************************************************************************/
43
44 enum ena_cmd_status {
45         ENA_CMD_SUBMITTED,
46         ENA_CMD_COMPLETED,
47         /* Abort - canceled by the driver */
48         ENA_CMD_ABORTED,
49 };
50
51 struct ena_comp_ctx {
52         ena_wait_event_t wait_event;
53         struct ena_admin_acq_entry *user_cqe;
54         u32 comp_size;
55         enum ena_cmd_status status;
56         /* status from the device */
57         u8 comp_status;
58         u8 cmd_opcode;
59         bool occupied;
60 };
61
62 struct ena_com_stats_ctx {
63         struct ena_admin_aq_get_stats_cmd get_cmd;
64         struct ena_admin_acq_get_stats_resp get_resp;
65 };
66
67 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
68                                        struct ena_common_mem_addr *ena_addr,
69                                        dma_addr_t addr)
70 {
71         if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
72                 ena_trc_err("dma address has more bits that the device supports\n");
73                 return ENA_COM_INVAL;
74         }
75
76         ena_addr->mem_addr_low = lower_32_bits(addr);
77         ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
78
79         return 0;
80 }
81
82 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
83 {
84         struct ena_com_admin_sq *sq = &queue->sq;
85         u16 size = ADMIN_SQ_SIZE(queue->q_depth);
86
87         ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
88                                sq->mem_handle);
89
90         if (!sq->entries) {
91                 ena_trc_err("memory allocation failed");
92                 return ENA_COM_NO_MEM;
93         }
94
95         sq->head = 0;
96         sq->tail = 0;
97         sq->phase = 1;
98
99         sq->db_addr = NULL;
100
101         return 0;
102 }
103
104 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
105 {
106         struct ena_com_admin_cq *cq = &queue->cq;
107         u16 size = ADMIN_CQ_SIZE(queue->q_depth);
108
109         ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
110                                cq->mem_handle);
111
112         if (!cq->entries)  {
113                 ena_trc_err("memory allocation failed");
114                 return ENA_COM_NO_MEM;
115         }
116
117         cq->head = 0;
118         cq->phase = 1;
119
120         return 0;
121 }
122
123 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
124                                    struct ena_aenq_handlers *aenq_handlers)
125 {
126         struct ena_com_aenq *aenq = &dev->aenq;
127         u32 addr_low, addr_high, aenq_caps;
128         u16 size;
129
130         dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
131         size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
132         ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
133                         aenq->entries,
134                         aenq->dma_addr,
135                         aenq->mem_handle);
136
137         if (!aenq->entries) {
138                 ena_trc_err("memory allocation failed");
139                 return ENA_COM_NO_MEM;
140         }
141
142         aenq->head = aenq->q_depth;
143         aenq->phase = 1;
144
145         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
146         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
147
148         ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
149         ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
150
151         aenq_caps = 0;
152         aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
153         aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
154                 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
155                 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
156         ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
157
158         if (unlikely(!aenq_handlers)) {
159                 ena_trc_err("aenq handlers pointer is NULL\n");
160                 return ENA_COM_INVAL;
161         }
162
163         aenq->aenq_handlers = aenq_handlers;
164
165         return 0;
166 }
167
168 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
169                                      struct ena_comp_ctx *comp_ctx)
170 {
171         comp_ctx->occupied = false;
172         ATOMIC32_DEC(&queue->outstanding_cmds);
173 }
174
175 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
176                                           u16 command_id, bool capture)
177 {
178         if (unlikely(command_id >= queue->q_depth)) {
179                 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
180                             command_id, queue->q_depth);
181                 return NULL;
182         }
183
184         if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
185                 ena_trc_err("Completion context is occupied\n");
186                 return NULL;
187         }
188
189         if (capture) {
190                 ATOMIC32_INC(&queue->outstanding_cmds);
191                 queue->comp_ctx[command_id].occupied = true;
192         }
193
194         return &queue->comp_ctx[command_id];
195 }
196
197 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
198                                                        struct ena_admin_aq_entry *cmd,
199                                                        size_t cmd_size_in_bytes,
200                                                        struct ena_admin_acq_entry *comp,
201                                                        size_t comp_size_in_bytes)
202 {
203         struct ena_comp_ctx *comp_ctx;
204         u16 tail_masked, cmd_id;
205         u16 queue_size_mask;
206         u16 cnt;
207
208         queue_size_mask = admin_queue->q_depth - 1;
209
210         tail_masked = admin_queue->sq.tail & queue_size_mask;
211
212         /* In case of queue FULL */
213         cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
214         if (cnt >= admin_queue->q_depth) {
215                 ena_trc_dbg("admin queue is full.\n");
216                 admin_queue->stats.out_of_space++;
217                 return ERR_PTR(ENA_COM_NO_SPACE);
218         }
219
220         cmd_id = admin_queue->curr_cmd_id;
221
222         cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
223                 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
224
225         cmd->aq_common_descriptor.command_id |= cmd_id &
226                 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
227
228         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
229         if (unlikely(!comp_ctx))
230                 return ERR_PTR(ENA_COM_INVAL);
231
232         comp_ctx->status = ENA_CMD_SUBMITTED;
233         comp_ctx->comp_size = (u32)comp_size_in_bytes;
234         comp_ctx->user_cqe = comp;
235         comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
236
237         ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
238
239         memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
240
241         admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
242                 queue_size_mask;
243
244         admin_queue->sq.tail++;
245         admin_queue->stats.submitted_cmd++;
246
247         if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
248                 admin_queue->sq.phase = !admin_queue->sq.phase;
249
250         ENA_DB_SYNC(&admin_queue->sq.mem_handle);
251         ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
252                         admin_queue->sq.db_addr);
253
254         return comp_ctx;
255 }
256
257 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
258 {
259         size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
260         struct ena_comp_ctx *comp_ctx;
261         u16 i;
262
263         queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
264         if (unlikely(!queue->comp_ctx)) {
265                 ena_trc_err("memory allocation failed");
266                 return ENA_COM_NO_MEM;
267         }
268
269         for (i = 0; i < queue->q_depth; i++) {
270                 comp_ctx = get_comp_ctxt(queue, i, false);
271                 if (comp_ctx)
272                         ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
273         }
274
275         return 0;
276 }
277
278 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
279                                                      struct ena_admin_aq_entry *cmd,
280                                                      size_t cmd_size_in_bytes,
281                                                      struct ena_admin_acq_entry *comp,
282                                                      size_t comp_size_in_bytes)
283 {
284         unsigned long flags = 0;
285         struct ena_comp_ctx *comp_ctx;
286
287         ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
288         if (unlikely(!admin_queue->running_state)) {
289                 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
290                 return ERR_PTR(ENA_COM_NO_DEVICE);
291         }
292         comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
293                                               cmd_size_in_bytes,
294                                               comp,
295                                               comp_size_in_bytes);
296         if (IS_ERR(comp_ctx))
297                 admin_queue->running_state = false;
298         ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
299
300         return comp_ctx;
301 }
302
303 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
304                               struct ena_com_create_io_ctx *ctx,
305                               struct ena_com_io_sq *io_sq)
306 {
307         size_t size;
308         int dev_node = 0;
309
310         memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
311
312         io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
313         io_sq->desc_entry_size =
314                 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
315                 sizeof(struct ena_eth_io_tx_desc) :
316                 sizeof(struct ena_eth_io_rx_desc);
317
318         size = io_sq->desc_entry_size * io_sq->q_depth;
319         io_sq->bus = ena_dev->bus;
320
321         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
322                 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
323                                             size,
324                                             io_sq->desc_addr.virt_addr,
325                                             io_sq->desc_addr.phys_addr,
326                                             io_sq->desc_addr.mem_handle,
327                                             ctx->numa_node,
328                                             dev_node);
329                 if (!io_sq->desc_addr.virt_addr) {
330                         ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
331                                                size,
332                                                io_sq->desc_addr.virt_addr,
333                                                io_sq->desc_addr.phys_addr,
334                                                io_sq->desc_addr.mem_handle);
335                 }
336
337                 if (!io_sq->desc_addr.virt_addr) {
338                         ena_trc_err("memory allocation failed");
339                         return ENA_COM_NO_MEM;
340                 }
341         }
342
343         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
344                 /* Allocate bounce buffers */
345                 io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
346                 io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
347                 io_sq->bounce_buf_ctrl.next_to_use = 0;
348
349                 size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
350
351                 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
352                                    size,
353                                    io_sq->bounce_buf_ctrl.base_buffer,
354                                    ctx->numa_node,
355                                    dev_node);
356                 if (!io_sq->bounce_buf_ctrl.base_buffer)
357                         io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
358
359                 if (!io_sq->bounce_buf_ctrl.base_buffer) {
360                         ena_trc_err("bounce buffer memory allocation failed");
361                         return ENA_COM_NO_MEM;
362                 }
363
364                 memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
365
366                 /* Initiate the first bounce buffer */
367                 io_sq->llq_buf_ctrl.curr_bounce_buf =
368                         ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
369                 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
370                        0x0, io_sq->llq_info.desc_list_entry_size);
371                 io_sq->llq_buf_ctrl.descs_left_in_line =
372                         io_sq->llq_info.descs_num_before_header;
373
374                 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
375                         io_sq->entries_in_tx_burst_left =
376                                 io_sq->llq_info.max_entries_in_tx_burst;
377         }
378
379         io_sq->tail = 0;
380         io_sq->next_to_comp = 0;
381         io_sq->phase = 1;
382
383         return 0;
384 }
385
386 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
387                               struct ena_com_create_io_ctx *ctx,
388                               struct ena_com_io_cq *io_cq)
389 {
390         size_t size;
391         int prev_node = 0;
392
393         memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
394
395         /* Use the basic completion descriptor for Rx */
396         io_cq->cdesc_entry_size_in_bytes =
397                 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
398                 sizeof(struct ena_eth_io_tx_cdesc) :
399                 sizeof(struct ena_eth_io_rx_cdesc_base);
400
401         size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
402         io_cq->bus = ena_dev->bus;
403
404         ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
405                         size,
406                         io_cq->cdesc_addr.virt_addr,
407                         io_cq->cdesc_addr.phys_addr,
408                         io_cq->cdesc_addr.mem_handle,
409                         ctx->numa_node,
410                         prev_node);
411         if (!io_cq->cdesc_addr.virt_addr) {
412                 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
413                                        size,
414                                        io_cq->cdesc_addr.virt_addr,
415                                        io_cq->cdesc_addr.phys_addr,
416                                        io_cq->cdesc_addr.mem_handle);
417         }
418
419         if (!io_cq->cdesc_addr.virt_addr) {
420                 ena_trc_err("memory allocation failed");
421                 return ENA_COM_NO_MEM;
422         }
423
424         io_cq->phase = 1;
425         io_cq->head = 0;
426
427         return 0;
428 }
429
430 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
431                                                    struct ena_admin_acq_entry *cqe)
432 {
433         struct ena_comp_ctx *comp_ctx;
434         u16 cmd_id;
435
436         cmd_id = cqe->acq_common_descriptor.command &
437                 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
438
439         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
440         if (unlikely(!comp_ctx)) {
441                 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
442                 admin_queue->running_state = false;
443                 return;
444         }
445
446         comp_ctx->status = ENA_CMD_COMPLETED;
447         comp_ctx->comp_status = cqe->acq_common_descriptor.status;
448
449         if (comp_ctx->user_cqe)
450                 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
451
452         if (!admin_queue->polling)
453                 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
454 }
455
456 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
457 {
458         struct ena_admin_acq_entry *cqe = NULL;
459         u16 comp_num = 0;
460         u16 head_masked;
461         u8 phase;
462
463         head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
464         phase = admin_queue->cq.phase;
465
466         cqe = &admin_queue->cq.entries[head_masked];
467
468         /* Go over all the completions */
469         while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
470                         ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
471                 /* Do not read the rest of the completion entry before the
472                  * phase bit was validated
473                  */
474                 dma_rmb();
475                 ena_com_handle_single_admin_completion(admin_queue, cqe);
476
477                 head_masked++;
478                 comp_num++;
479                 if (unlikely(head_masked == admin_queue->q_depth)) {
480                         head_masked = 0;
481                         phase = !phase;
482                 }
483
484                 cqe = &admin_queue->cq.entries[head_masked];
485         }
486
487         admin_queue->cq.head += comp_num;
488         admin_queue->cq.phase = phase;
489         admin_queue->sq.head += comp_num;
490         admin_queue->stats.completed_cmd += comp_num;
491 }
492
493 static int ena_com_comp_status_to_errno(u8 comp_status)
494 {
495         if (unlikely(comp_status != 0))
496                 ena_trc_err("admin command failed[%u]\n", comp_status);
497
498         if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
499                 return ENA_COM_INVAL;
500
501         switch (comp_status) {
502         case ENA_ADMIN_SUCCESS:
503                 return 0;
504         case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
505                 return ENA_COM_NO_MEM;
506         case ENA_ADMIN_UNSUPPORTED_OPCODE:
507                 return ENA_COM_UNSUPPORTED;
508         case ENA_ADMIN_BAD_OPCODE:
509         case ENA_ADMIN_MALFORMED_REQUEST:
510         case ENA_ADMIN_ILLEGAL_PARAMETER:
511         case ENA_ADMIN_UNKNOWN_ERROR:
512                 return ENA_COM_INVAL;
513         }
514
515         return 0;
516 }
517
518 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
519                                                      struct ena_com_admin_queue *admin_queue)
520 {
521         unsigned long flags = 0;
522         uint64_t timeout;
523         int ret;
524
525         timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
526
527         while (1) {
528                 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
529                 ena_com_handle_admin_completion(admin_queue);
530                 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
531
532                 if (comp_ctx->status != ENA_CMD_SUBMITTED)
533                         break;
534
535                 if (ENA_TIME_EXPIRE(timeout)) {
536                         ena_trc_err("Wait for completion (polling) timeout\n");
537                         /* ENA didn't have any completion */
538                         ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
539                         admin_queue->stats.no_completion++;
540                         admin_queue->running_state = false;
541                         ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
542
543                         ret = ENA_COM_TIMER_EXPIRED;
544                         goto err;
545                 }
546
547                 ENA_MSLEEP(ENA_POLL_MS);
548         }
549
550         if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
551                 ena_trc_err("Command was aborted\n");
552                 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
553                 admin_queue->stats.aborted_cmd++;
554                 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
555                 ret = ENA_COM_NO_DEVICE;
556                 goto err;
557         }
558
559         ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
560                  "Invalid comp status %d\n", comp_ctx->status);
561
562         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
563 err:
564         comp_ctxt_release(admin_queue, comp_ctx);
565         return ret;
566 }
567
568 /**
569  * Set the LLQ configurations of the firmware
570  *
571  * The driver provides only the enabled feature values to the FW,
572  * which in turn, checks if they are supported.
573  */
574 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
575 {
576         struct ena_com_admin_queue *admin_queue;
577         struct ena_admin_set_feat_cmd cmd;
578         struct ena_admin_set_feat_resp resp;
579         struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
580         int ret;
581
582         memset(&cmd, 0x0, sizeof(cmd));
583         admin_queue = &ena_dev->admin_queue;
584
585         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
586         cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
587
588         cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
589         cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
590         cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
591         cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
592
593         ret = ena_com_execute_admin_command(admin_queue,
594                                             (struct ena_admin_aq_entry *)&cmd,
595                                             sizeof(cmd),
596                                             (struct ena_admin_acq_entry *)&resp,
597                                             sizeof(resp));
598
599         if (unlikely(ret))
600                 ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
601
602         return ret;
603 }
604
605 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
606                                    struct ena_admin_feature_llq_desc *llq_features,
607                                    struct ena_llq_configurations *llq_default_cfg)
608 {
609         struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
610         u16 supported_feat;
611         int rc;
612
613         memset(llq_info, 0, sizeof(*llq_info));
614
615         supported_feat = llq_features->header_location_ctrl_supported;
616
617         if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
618                 llq_info->header_location_ctrl = llq_default_cfg->llq_header_location;
619         } else {
620                 ena_trc_err("Invalid header location control, supported: 0x%x\n",
621                             supported_feat);
622                 return -EINVAL;
623         }
624
625         if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
626                 llq_info->inline_header = true;
627
628                 supported_feat = llq_features->descriptors_stride_ctrl_supported;
629                 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
630                         llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
631                 } else  {
632                         if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
633                                 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
634                         } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
635                                 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
636                         } else {
637                                 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
638                                             supported_feat);
639                                 return -EINVAL;
640                         }
641
642                         ena_trc_err("Default llq stride ctrl is not supported, performing fallback,"
643                                     "default: 0x%x, supported: 0x%x, used: 0x%x\n",
644                                     llq_default_cfg->llq_stride_ctrl,
645                                     supported_feat,
646                                     llq_info->desc_stride_ctrl);
647                 }
648         } else {
649                 llq_info->inline_header = false;
650                 llq_info->desc_stride_ctrl = 0;
651         }
652
653         supported_feat = llq_features->entry_size_ctrl_supported;
654         if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
655                 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
656                 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
657         } else {
658                 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
659                         llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
660                         llq_info->desc_list_entry_size = 128;
661                 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
662                         llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
663                         llq_info->desc_list_entry_size = 192;
664                 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
665                         llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
666                         llq_info->desc_list_entry_size = 256;
667                 } else {
668                         ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
669                         return -EINVAL;
670                 }
671
672                 ena_trc_err("Default llq ring entry size is not supported, performing fallback,"
673                             "default: 0x%x, supported: 0x%x, used: 0x%x\n",
674                             llq_default_cfg->llq_ring_entry_size,
675                             supported_feat,
676                             llq_info->desc_list_entry_size);
677         }
678         if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
679                 /* The desc list entry size should be whole multiply of 8
680                  * This requirement comes from __iowrite64_copy()
681                  */
682                 ena_trc_err("illegal entry size %d\n",
683                             llq_info->desc_list_entry_size);
684                 return -EINVAL;
685         }
686
687         if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
688                 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
689                         sizeof(struct ena_eth_io_tx_desc);
690         else
691                 llq_info->descs_per_entry = 1;
692
693         supported_feat = llq_features->desc_num_before_header_supported;
694         if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
695                 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
696         } else {
697                 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
698                         llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
699                 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
700                         llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
701                 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
702                         llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
703                 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
704                         llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
705                 } else {
706                         ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
707                                     supported_feat);
708                         return -EINVAL;
709                 }
710
711                 ena_trc_err("Default llq num descs before header is not supported, performing fallback,"
712                             "default: 0x%x, supported: 0x%x, used: 0x%x\n",
713                             llq_default_cfg->llq_num_decs_before_header,
714                             supported_feat,
715                             llq_info->descs_num_before_header);
716         }
717
718         llq_info->max_entries_in_tx_burst =
719                 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
720
721         rc = ena_com_set_llq(ena_dev);
722         if (rc)
723                 ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
724
725         return 0;
726 }
727
728
729
730 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
731                                                         struct ena_com_admin_queue *admin_queue)
732 {
733         unsigned long flags = 0;
734         int ret;
735
736         ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
737                             admin_queue->completion_timeout);
738
739         /* In case the command wasn't completed find out the root cause.
740          * There might be 2 kinds of errors
741          * 1) No completion (timeout reached)
742          * 2) There is completion but the device didn't get any msi-x interrupt.
743          */
744         if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
745                 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
746                 ena_com_handle_admin_completion(admin_queue);
747                 admin_queue->stats.no_completion++;
748                 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
749
750                 if (comp_ctx->status == ENA_CMD_COMPLETED)
751                         ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
752                                     comp_ctx->cmd_opcode);
753                 else
754                         ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
755                                     comp_ctx->cmd_opcode, comp_ctx->status);
756
757                 admin_queue->running_state = false;
758                 ret = ENA_COM_TIMER_EXPIRED;
759                 goto err;
760         }
761
762         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
763 err:
764         comp_ctxt_release(admin_queue, comp_ctx);
765         return ret;
766 }
767
768 /* This method read the hardware device register through posting writes
769  * and waiting for response
770  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
771  */
772 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
773 {
774         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
775         volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
776                 mmio_read->read_resp;
777         u32 mmio_read_reg, ret, i;
778         unsigned long flags = 0;
779         u32 timeout = mmio_read->reg_read_to;
780
781         ENA_MIGHT_SLEEP();
782
783         if (timeout == 0)
784                 timeout = ENA_REG_READ_TIMEOUT;
785
786         /* If readless is disabled, perform regular read */
787         if (!mmio_read->readless_supported)
788                 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
789
790         ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
791         mmio_read->seq_num++;
792
793         read_resp->req_id = mmio_read->seq_num + 0xDEAD;
794         mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
795                         ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
796         mmio_read_reg |= mmio_read->seq_num &
797                         ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
798
799         ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
800                         ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
801
802         for (i = 0; i < timeout; i++) {
803                 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
804                         break;
805
806                 ENA_UDELAY(1);
807         }
808
809         if (unlikely(i == timeout)) {
810                 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
811                             mmio_read->seq_num,
812                             offset,
813                             read_resp->req_id,
814                             read_resp->reg_off);
815                 ret = ENA_MMIO_READ_TIMEOUT;
816                 goto err;
817         }
818
819         if (read_resp->reg_off != offset) {
820                 ena_trc_err("Read failure: wrong offset provided");
821                 ret = ENA_MMIO_READ_TIMEOUT;
822         } else {
823                 ret = read_resp->reg_val;
824         }
825 err:
826         ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
827
828         return ret;
829 }
830
831 /* There are two types to wait for completion.
832  * Polling mode - wait until the completion is available.
833  * Async mode - wait on wait queue until the completion is ready
834  * (or the timeout expired).
835  * It is expected that the IRQ called ena_com_handle_admin_completion
836  * to mark the completions.
837  */
838 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
839                                              struct ena_com_admin_queue *admin_queue)
840 {
841         if (admin_queue->polling)
842                 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
843                                                                  admin_queue);
844
845         return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
846                                                             admin_queue);
847 }
848
849 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
850                                  struct ena_com_io_sq *io_sq)
851 {
852         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
853         struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
854         struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
855         u8 direction;
856         int ret;
857
858         memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
859
860         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
861                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
862         else
863                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
864
865         destroy_cmd.sq.sq_identity |= (direction <<
866                 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
867                 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
868
869         destroy_cmd.sq.sq_idx = io_sq->idx;
870         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
871
872         ret = ena_com_execute_admin_command(admin_queue,
873                                             (struct ena_admin_aq_entry *)&destroy_cmd,
874                                             sizeof(destroy_cmd),
875                                             (struct ena_admin_acq_entry *)&destroy_resp,
876                                             sizeof(destroy_resp));
877
878         if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
879                 ena_trc_err("failed to destroy io sq error: %d\n", ret);
880
881         return ret;
882 }
883
884 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
885                                   struct ena_com_io_sq *io_sq,
886                                   struct ena_com_io_cq *io_cq)
887 {
888         size_t size;
889
890         if (io_cq->cdesc_addr.virt_addr) {
891                 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
892
893                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
894                                       size,
895                                       io_cq->cdesc_addr.virt_addr,
896                                       io_cq->cdesc_addr.phys_addr,
897                                       io_cq->cdesc_addr.mem_handle);
898
899                 io_cq->cdesc_addr.virt_addr = NULL;
900         }
901
902         if (io_sq->desc_addr.virt_addr) {
903                 size = io_sq->desc_entry_size * io_sq->q_depth;
904
905                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
906                                       size,
907                                       io_sq->desc_addr.virt_addr,
908                                       io_sq->desc_addr.phys_addr,
909                                       io_sq->desc_addr.mem_handle);
910
911                 io_sq->desc_addr.virt_addr = NULL;
912         }
913
914         if (io_sq->bounce_buf_ctrl.base_buffer) {
915                 size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
916                 ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
917                 io_sq->bounce_buf_ctrl.base_buffer = NULL;
918         }
919 }
920
921 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
922                                 u16 exp_state)
923 {
924         u32 val, i;
925
926         /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
927         timeout = (timeout * 100) / ENA_POLL_MS;
928
929         for (i = 0; i < timeout; i++) {
930                 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
931
932                 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
933                         ena_trc_err("Reg read timeout occurred\n");
934                         return ENA_COM_TIMER_EXPIRED;
935                 }
936
937                 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
938                         exp_state)
939                         return 0;
940
941                 ENA_MSLEEP(ENA_POLL_MS);
942         }
943
944         return ENA_COM_TIMER_EXPIRED;
945 }
946
947 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
948                                                enum ena_admin_aq_feature_id feature_id)
949 {
950         u32 feature_mask = 1 << feature_id;
951
952         /* Device attributes is always supported */
953         if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
954             !(ena_dev->supported_features & feature_mask))
955                 return false;
956
957         return true;
958 }
959
960 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
961                                   struct ena_admin_get_feat_resp *get_resp,
962                                   enum ena_admin_aq_feature_id feature_id,
963                                   dma_addr_t control_buf_dma_addr,
964                                   u32 control_buff_size,
965                                   u8 feature_ver)
966 {
967         struct ena_com_admin_queue *admin_queue;
968         struct ena_admin_get_feat_cmd get_cmd;
969         int ret;
970
971         if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
972                 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
973                 return ENA_COM_UNSUPPORTED;
974         }
975
976         memset(&get_cmd, 0x0, sizeof(get_cmd));
977         admin_queue = &ena_dev->admin_queue;
978
979         get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
980
981         if (control_buff_size)
982                 get_cmd.aq_common_descriptor.flags =
983                         ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
984         else
985                 get_cmd.aq_common_descriptor.flags = 0;
986
987         ret = ena_com_mem_addr_set(ena_dev,
988                                    &get_cmd.control_buffer.address,
989                                    control_buf_dma_addr);
990         if (unlikely(ret)) {
991                 ena_trc_err("memory address set failed\n");
992                 return ret;
993         }
994
995         get_cmd.control_buffer.length = control_buff_size;
996         get_cmd.feat_common.feature_version = feature_ver;
997         get_cmd.feat_common.feature_id = feature_id;
998
999         ret = ena_com_execute_admin_command(admin_queue,
1000                                             (struct ena_admin_aq_entry *)
1001                                             &get_cmd,
1002                                             sizeof(get_cmd),
1003                                             (struct ena_admin_acq_entry *)
1004                                             get_resp,
1005                                             sizeof(*get_resp));
1006
1007         if (unlikely(ret))
1008                 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
1009                             feature_id, ret);
1010
1011         return ret;
1012 }
1013
1014 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1015                                struct ena_admin_get_feat_resp *get_resp,
1016                                enum ena_admin_aq_feature_id feature_id,
1017                                u8 feature_ver)
1018 {
1019         return ena_com_get_feature_ex(ena_dev,
1020                                       get_resp,
1021                                       feature_id,
1022                                       0,
1023                                       0,
1024                                       feature_ver);
1025 }
1026
1027 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1028 {
1029         struct ena_rss *rss = &ena_dev->rss;
1030
1031         ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1032                                sizeof(*rss->hash_key),
1033                                rss->hash_key,
1034                                rss->hash_key_dma_addr,
1035                                rss->hash_key_mem_handle);
1036
1037         if (unlikely(!rss->hash_key))
1038                 return ENA_COM_NO_MEM;
1039
1040         return 0;
1041 }
1042
1043 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1044 {
1045         struct ena_rss *rss = &ena_dev->rss;
1046
1047         if (rss->hash_key)
1048                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1049                                       sizeof(*rss->hash_key),
1050                                       rss->hash_key,
1051                                       rss->hash_key_dma_addr,
1052                                       rss->hash_key_mem_handle);
1053         rss->hash_key = NULL;
1054 }
1055
1056 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1057 {
1058         struct ena_rss *rss = &ena_dev->rss;
1059
1060         ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1061                                sizeof(*rss->hash_ctrl),
1062                                rss->hash_ctrl,
1063                                rss->hash_ctrl_dma_addr,
1064                                rss->hash_ctrl_mem_handle);
1065
1066         if (unlikely(!rss->hash_ctrl))
1067                 return ENA_COM_NO_MEM;
1068
1069         return 0;
1070 }
1071
1072 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1073 {
1074         struct ena_rss *rss = &ena_dev->rss;
1075
1076         if (rss->hash_ctrl)
1077                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1078                                       sizeof(*rss->hash_ctrl),
1079                                       rss->hash_ctrl,
1080                                       rss->hash_ctrl_dma_addr,
1081                                       rss->hash_ctrl_mem_handle);
1082         rss->hash_ctrl = NULL;
1083 }
1084
1085 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1086                                            u16 log_size)
1087 {
1088         struct ena_rss *rss = &ena_dev->rss;
1089         struct ena_admin_get_feat_resp get_resp;
1090         size_t tbl_size;
1091         int ret;
1092
1093         ret = ena_com_get_feature(ena_dev, &get_resp,
1094                                   ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1095         if (unlikely(ret))
1096                 return ret;
1097
1098         if ((get_resp.u.ind_table.min_size > log_size) ||
1099             (get_resp.u.ind_table.max_size < log_size)) {
1100                 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1101                             1 << log_size,
1102                             1 << get_resp.u.ind_table.min_size,
1103                             1 << get_resp.u.ind_table.max_size);
1104                 return ENA_COM_INVAL;
1105         }
1106
1107         tbl_size = (1ULL << log_size) *
1108                 sizeof(struct ena_admin_rss_ind_table_entry);
1109
1110         ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1111                              tbl_size,
1112                              rss->rss_ind_tbl,
1113                              rss->rss_ind_tbl_dma_addr,
1114                              rss->rss_ind_tbl_mem_handle);
1115         if (unlikely(!rss->rss_ind_tbl))
1116                 goto mem_err1;
1117
1118         tbl_size = (1ULL << log_size) * sizeof(u16);
1119         rss->host_rss_ind_tbl =
1120                 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1121         if (unlikely(!rss->host_rss_ind_tbl))
1122                 goto mem_err2;
1123
1124         rss->tbl_log_size = log_size;
1125
1126         return 0;
1127
1128 mem_err2:
1129         tbl_size = (1ULL << log_size) *
1130                 sizeof(struct ena_admin_rss_ind_table_entry);
1131
1132         ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1133                               tbl_size,
1134                               rss->rss_ind_tbl,
1135                               rss->rss_ind_tbl_dma_addr,
1136                               rss->rss_ind_tbl_mem_handle);
1137         rss->rss_ind_tbl = NULL;
1138 mem_err1:
1139         rss->tbl_log_size = 0;
1140         return ENA_COM_NO_MEM;
1141 }
1142
1143 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1144 {
1145         struct ena_rss *rss = &ena_dev->rss;
1146         size_t tbl_size = (1ULL << rss->tbl_log_size) *
1147                 sizeof(struct ena_admin_rss_ind_table_entry);
1148
1149         if (rss->rss_ind_tbl)
1150                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1151                                       tbl_size,
1152                                       rss->rss_ind_tbl,
1153                                       rss->rss_ind_tbl_dma_addr,
1154                                       rss->rss_ind_tbl_mem_handle);
1155         rss->rss_ind_tbl = NULL;
1156
1157         if (rss->host_rss_ind_tbl)
1158                 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
1159         rss->host_rss_ind_tbl = NULL;
1160 }
1161
1162 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1163                                 struct ena_com_io_sq *io_sq, u16 cq_idx)
1164 {
1165         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1166         struct ena_admin_aq_create_sq_cmd create_cmd;
1167         struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1168         u8 direction;
1169         int ret;
1170
1171         memset(&create_cmd, 0x0, sizeof(create_cmd));
1172
1173         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1174
1175         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1176                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1177         else
1178                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1179
1180         create_cmd.sq_identity |= (direction <<
1181                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1182                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1183
1184         create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1185                 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1186
1187         create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1188                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1189                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1190
1191         create_cmd.sq_caps_3 |=
1192                 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1193
1194         create_cmd.cq_idx = cq_idx;
1195         create_cmd.sq_depth = io_sq->q_depth;
1196
1197         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1198                 ret = ena_com_mem_addr_set(ena_dev,
1199                                            &create_cmd.sq_ba,
1200                                            io_sq->desc_addr.phys_addr);
1201                 if (unlikely(ret)) {
1202                         ena_trc_err("memory address set failed\n");
1203                         return ret;
1204                 }
1205         }
1206
1207         ret = ena_com_execute_admin_command(admin_queue,
1208                                             (struct ena_admin_aq_entry *)&create_cmd,
1209                                             sizeof(create_cmd),
1210                                             (struct ena_admin_acq_entry *)&cmd_completion,
1211                                             sizeof(cmd_completion));
1212         if (unlikely(ret)) {
1213                 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1214                 return ret;
1215         }
1216
1217         io_sq->idx = cmd_completion.sq_idx;
1218
1219         io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1220                 (uintptr_t)cmd_completion.sq_doorbell_offset);
1221
1222         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1223                 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1224                                 + cmd_completion.llq_headers_offset);
1225
1226                 io_sq->desc_addr.pbuf_dev_addr =
1227                         (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1228                         cmd_completion.llq_descriptors_offset);
1229         }
1230
1231         ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1232
1233         return ret;
1234 }
1235
1236 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1237 {
1238         struct ena_rss *rss = &ena_dev->rss;
1239         struct ena_com_io_sq *io_sq;
1240         u16 qid;
1241         int i;
1242
1243         for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1244                 qid = rss->host_rss_ind_tbl[i];
1245                 if (qid >= ENA_TOTAL_NUM_QUEUES)
1246                         return ENA_COM_INVAL;
1247
1248                 io_sq = &ena_dev->io_sq_queues[qid];
1249
1250                 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1251                         return ENA_COM_INVAL;
1252
1253                 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1254         }
1255
1256         return 0;
1257 }
1258
1259 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1260 {
1261         u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1262         struct ena_rss *rss = &ena_dev->rss;
1263         u8 idx;
1264         u16 i;
1265
1266         for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1267                 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1268
1269         for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1270                 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1271                         return ENA_COM_INVAL;
1272                 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1273
1274                 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1275                         return ENA_COM_INVAL;
1276
1277                 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1278         }
1279
1280         return 0;
1281 }
1282
1283 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1284 {
1285         size_t size;
1286
1287         size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1288
1289         ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1290         if (!ena_dev->intr_moder_tbl)
1291                 return ENA_COM_NO_MEM;
1292
1293         ena_com_config_default_interrupt_moderation_table(ena_dev);
1294
1295         return 0;
1296 }
1297
1298 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1299                                                  u16 intr_delay_resolution)
1300 {
1301         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1302         unsigned int i;
1303
1304         if (!intr_delay_resolution) {
1305                 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1306                 intr_delay_resolution = 1;
1307         }
1308         ena_dev->intr_delay_resolution = intr_delay_resolution;
1309
1310         /* update Rx */
1311         for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1312                 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1313
1314         /* update Tx */
1315         ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1316 }
1317
1318 /*****************************************************************************/
1319 /*******************************      API       ******************************/
1320 /*****************************************************************************/
1321
1322 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1323                                   struct ena_admin_aq_entry *cmd,
1324                                   size_t cmd_size,
1325                                   struct ena_admin_acq_entry *comp,
1326                                   size_t comp_size)
1327 {
1328         struct ena_comp_ctx *comp_ctx;
1329         int ret;
1330
1331         comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1332                                             comp, comp_size);
1333         if (IS_ERR(comp_ctx)) {
1334                 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1335                         ena_trc_dbg("Failed to submit command [%ld]\n",
1336                                     PTR_ERR(comp_ctx));
1337                 else
1338                         ena_trc_err("Failed to submit command [%ld]\n",
1339                                     PTR_ERR(comp_ctx));
1340
1341                 return PTR_ERR(comp_ctx);
1342         }
1343
1344         ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1345         if (unlikely(ret)) {
1346                 if (admin_queue->running_state)
1347                         ena_trc_err("Failed to process command. ret = %d\n",
1348                                     ret);
1349                 else
1350                         ena_trc_dbg("Failed to process command. ret = %d\n",
1351                                     ret);
1352         }
1353         return ret;
1354 }
1355
1356 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1357                          struct ena_com_io_cq *io_cq)
1358 {
1359         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1360         struct ena_admin_aq_create_cq_cmd create_cmd;
1361         struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1362         int ret;
1363
1364         memset(&create_cmd, 0x0, sizeof(create_cmd));
1365
1366         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1367
1368         create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1369                 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1370         create_cmd.cq_caps_1 |=
1371                 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1372
1373         create_cmd.msix_vector = io_cq->msix_vector;
1374         create_cmd.cq_depth = io_cq->q_depth;
1375
1376         ret = ena_com_mem_addr_set(ena_dev,
1377                                    &create_cmd.cq_ba,
1378                                    io_cq->cdesc_addr.phys_addr);
1379         if (unlikely(ret)) {
1380                 ena_trc_err("memory address set failed\n");
1381                 return ret;
1382         }
1383
1384         ret = ena_com_execute_admin_command(admin_queue,
1385                                             (struct ena_admin_aq_entry *)&create_cmd,
1386                                             sizeof(create_cmd),
1387                                             (struct ena_admin_acq_entry *)&cmd_completion,
1388                                             sizeof(cmd_completion));
1389         if (unlikely(ret)) {
1390                 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1391                 return ret;
1392         }
1393
1394         io_cq->idx = cmd_completion.cq_idx;
1395
1396         io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1397                 cmd_completion.cq_interrupt_unmask_register_offset);
1398
1399         if (cmd_completion.cq_head_db_register_offset)
1400                 io_cq->cq_head_db_reg =
1401                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1402                         cmd_completion.cq_head_db_register_offset);
1403
1404         if (cmd_completion.numa_node_register_offset)
1405                 io_cq->numa_node_cfg_reg =
1406                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1407                         cmd_completion.numa_node_register_offset);
1408
1409         ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1410
1411         return ret;
1412 }
1413
1414 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1415                             struct ena_com_io_sq **io_sq,
1416                             struct ena_com_io_cq **io_cq)
1417 {
1418         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1419                 ena_trc_err("Invalid queue number %d but the max is %d\n",
1420                             qid, ENA_TOTAL_NUM_QUEUES);
1421                 return ENA_COM_INVAL;
1422         }
1423
1424         *io_sq = &ena_dev->io_sq_queues[qid];
1425         *io_cq = &ena_dev->io_cq_queues[qid];
1426
1427         return 0;
1428 }
1429
1430 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1431 {
1432         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1433         struct ena_comp_ctx *comp_ctx;
1434         u16 i;
1435
1436         if (!admin_queue->comp_ctx)
1437                 return;
1438
1439         for (i = 0; i < admin_queue->q_depth; i++) {
1440                 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1441                 if (unlikely(!comp_ctx))
1442                         break;
1443
1444                 comp_ctx->status = ENA_CMD_ABORTED;
1445
1446                 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1447         }
1448 }
1449
1450 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1451 {
1452         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1453         unsigned long flags = 0;
1454
1455         ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1456         while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1457                 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1458                 ENA_MSLEEP(ENA_POLL_MS);
1459                 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1460         }
1461         ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1462 }
1463
1464 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1465                           struct ena_com_io_cq *io_cq)
1466 {
1467         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1468         struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1469         struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1470         int ret;
1471
1472         memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1473
1474         destroy_cmd.cq_idx = io_cq->idx;
1475         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1476
1477         ret = ena_com_execute_admin_command(admin_queue,
1478                                             (struct ena_admin_aq_entry *)&destroy_cmd,
1479                                             sizeof(destroy_cmd),
1480                                             (struct ena_admin_acq_entry *)&destroy_resp,
1481                                             sizeof(destroy_resp));
1482
1483         if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1484                 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1485
1486         return ret;
1487 }
1488
1489 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1490 {
1491         return ena_dev->admin_queue.running_state;
1492 }
1493
1494 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1495 {
1496         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1497         unsigned long flags = 0;
1498
1499         ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1500         ena_dev->admin_queue.running_state = state;
1501         ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1502 }
1503
1504 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1505 {
1506         u16 depth = ena_dev->aenq.q_depth;
1507
1508         ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1509
1510         /* Init head_db to mark that all entries in the queue
1511          * are initially available
1512          */
1513         ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1514 }
1515
1516 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1517 {
1518         struct ena_com_admin_queue *admin_queue;
1519         struct ena_admin_set_feat_cmd cmd;
1520         struct ena_admin_set_feat_resp resp;
1521         struct ena_admin_get_feat_resp get_resp;
1522         int ret;
1523
1524         ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1525         if (ret) {
1526                 ena_trc_info("Can't get aenq configuration\n");
1527                 return ret;
1528         }
1529
1530         if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1531                 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1532                              get_resp.u.aenq.supported_groups,
1533                              groups_flag);
1534                 return ENA_COM_UNSUPPORTED;
1535         }
1536
1537         memset(&cmd, 0x0, sizeof(cmd));
1538         admin_queue = &ena_dev->admin_queue;
1539
1540         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1541         cmd.aq_common_descriptor.flags = 0;
1542         cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1543         cmd.u.aenq.enabled_groups = groups_flag;
1544
1545         ret = ena_com_execute_admin_command(admin_queue,
1546                                             (struct ena_admin_aq_entry *)&cmd,
1547                                             sizeof(cmd),
1548                                             (struct ena_admin_acq_entry *)&resp,
1549                                             sizeof(resp));
1550
1551         if (unlikely(ret))
1552                 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1553
1554         return ret;
1555 }
1556
1557 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1558 {
1559         u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1560         int width;
1561
1562         if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1563                 ena_trc_err("Reg read timeout occurred\n");
1564                 return ENA_COM_TIMER_EXPIRED;
1565         }
1566
1567         width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1568                 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1569
1570         ena_trc_dbg("ENA dma width: %d\n", width);
1571
1572         if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1573                 ena_trc_err("DMA width illegal value: %d\n", width);
1574                 return ENA_COM_INVAL;
1575         }
1576
1577         ena_dev->dma_addr_bits = width;
1578
1579         return width;
1580 }
1581
1582 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1583 {
1584         u32 ver;
1585         u32 ctrl_ver;
1586         u32 ctrl_ver_masked;
1587
1588         /* Make sure the ENA version and the controller version are at least
1589          * as the driver expects
1590          */
1591         ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1592         ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1593                                           ENA_REGS_CONTROLLER_VERSION_OFF);
1594
1595         if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1596                      (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1597                 ena_trc_err("Reg read timeout occurred\n");
1598                 return ENA_COM_TIMER_EXPIRED;
1599         }
1600
1601         ena_trc_info("ena device version: %d.%d\n",
1602                      (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1603                      ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1604                      ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1605
1606         ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1607                      (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1608                      >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1609                      (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1610                      >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1611                      (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1612                      (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1613                      ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1614
1615         ctrl_ver_masked =
1616                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1617                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1618                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1619
1620         /* Validate the ctrl version without the implementation ID */
1621         if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1622                 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1623                 return -1;
1624         }
1625
1626         return 0;
1627 }
1628
1629 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1630 {
1631         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1632         struct ena_com_admin_cq *cq = &admin_queue->cq;
1633         struct ena_com_admin_sq *sq = &admin_queue->sq;
1634         struct ena_com_aenq *aenq = &ena_dev->aenq;
1635         u16 size;
1636
1637         ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1638         if (admin_queue->comp_ctx)
1639                 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
1640         admin_queue->comp_ctx = NULL;
1641         size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1642         if (sq->entries)
1643                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1644                                       sq->dma_addr, sq->mem_handle);
1645         sq->entries = NULL;
1646
1647         size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1648         if (cq->entries)
1649                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1650                                       cq->dma_addr, cq->mem_handle);
1651         cq->entries = NULL;
1652
1653         size = ADMIN_AENQ_SIZE(aenq->q_depth);
1654         if (ena_dev->aenq.entries)
1655                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1656                                       aenq->dma_addr, aenq->mem_handle);
1657         aenq->entries = NULL;
1658         ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1659 }
1660
1661 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1662 {
1663         u32 mask_value = 0;
1664
1665         if (polling)
1666                 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1667
1668         ENA_REG_WRITE32(ena_dev->bus, mask_value,
1669                         ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1670         ena_dev->admin_queue.polling = polling;
1671 }
1672
1673 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1674 {
1675         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1676
1677         ENA_SPINLOCK_INIT(mmio_read->lock);
1678         ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1679                                sizeof(*mmio_read->read_resp),
1680                                mmio_read->read_resp,
1681                                mmio_read->read_resp_dma_addr,
1682                                mmio_read->read_resp_mem_handle);
1683         if (unlikely(!mmio_read->read_resp))
1684                 goto err;
1685
1686         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1687
1688         mmio_read->read_resp->req_id = 0x0;
1689         mmio_read->seq_num = 0x0;
1690         mmio_read->readless_supported = true;
1691
1692         return 0;
1693
1694 err:
1695                 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1696                 return ENA_COM_NO_MEM;
1697 }
1698
1699 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1700 {
1701         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1702
1703         mmio_read->readless_supported = readless_supported;
1704 }
1705
1706 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1707 {
1708         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1709
1710         ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1711         ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1712
1713         ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1714                               sizeof(*mmio_read->read_resp),
1715                               mmio_read->read_resp,
1716                               mmio_read->read_resp_dma_addr,
1717                               mmio_read->read_resp_mem_handle);
1718
1719         mmio_read->read_resp = NULL;
1720         ENA_SPINLOCK_DESTROY(mmio_read->lock);
1721 }
1722
1723 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1724 {
1725         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1726         u32 addr_low, addr_high;
1727
1728         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1729         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1730
1731         ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1732         ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1733 }
1734
1735 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1736                        struct ena_aenq_handlers *aenq_handlers)
1737 {
1738         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1739         u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1740         int ret;
1741
1742         dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1743
1744         if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1745                 ena_trc_err("Reg read timeout occurred\n");
1746                 return ENA_COM_TIMER_EXPIRED;
1747         }
1748
1749         if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1750                 ena_trc_err("Device isn't ready, abort com init\n");
1751                 return ENA_COM_NO_DEVICE;
1752         }
1753
1754         admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1755
1756         admin_queue->bus = ena_dev->bus;
1757         admin_queue->q_dmadev = ena_dev->dmadev;
1758         admin_queue->polling = false;
1759         admin_queue->curr_cmd_id = 0;
1760
1761         ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1762
1763         ENA_SPINLOCK_INIT(admin_queue->q_lock);
1764
1765         ret = ena_com_init_comp_ctxt(admin_queue);
1766         if (ret)
1767                 goto error;
1768
1769         ret = ena_com_admin_init_sq(admin_queue);
1770         if (ret)
1771                 goto error;
1772
1773         ret = ena_com_admin_init_cq(admin_queue);
1774         if (ret)
1775                 goto error;
1776
1777         admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1778                 ENA_REGS_AQ_DB_OFF);
1779
1780         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1781         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1782
1783         ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1784         ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1785
1786         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1787         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1788
1789         ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1790         ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1791
1792         aq_caps = 0;
1793         aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1794         aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1795                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1796                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1797
1798         acq_caps = 0;
1799         acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1800         acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1801                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1802                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1803
1804         ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1805         ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1806         ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1807         if (ret)
1808                 goto error;
1809
1810         admin_queue->running_state = true;
1811
1812         return 0;
1813 error:
1814         ena_com_admin_destroy(ena_dev);
1815
1816         return ret;
1817 }
1818
1819 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1820                             struct ena_com_create_io_ctx *ctx)
1821 {
1822         struct ena_com_io_sq *io_sq;
1823         struct ena_com_io_cq *io_cq;
1824         int ret;
1825
1826         if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1827                 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1828                             ctx->qid, ENA_TOTAL_NUM_QUEUES);
1829                 return ENA_COM_INVAL;
1830         }
1831
1832         io_sq = &ena_dev->io_sq_queues[ctx->qid];
1833         io_cq = &ena_dev->io_cq_queues[ctx->qid];
1834
1835         memset(io_sq, 0x0, sizeof(*io_sq));
1836         memset(io_cq, 0x0, sizeof(*io_cq));
1837
1838         /* Init CQ */
1839         io_cq->q_depth = ctx->queue_size;
1840         io_cq->direction = ctx->direction;
1841         io_cq->qid = ctx->qid;
1842
1843         io_cq->msix_vector = ctx->msix_vector;
1844
1845         io_sq->q_depth = ctx->queue_size;
1846         io_sq->direction = ctx->direction;
1847         io_sq->qid = ctx->qid;
1848
1849         io_sq->mem_queue_type = ctx->mem_queue_type;
1850
1851         if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1852                 /* header length is limited to 8 bits */
1853                 io_sq->tx_max_header_size =
1854                         ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1855
1856         ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1857         if (ret)
1858                 goto error;
1859         ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1860         if (ret)
1861                 goto error;
1862
1863         ret = ena_com_create_io_cq(ena_dev, io_cq);
1864         if (ret)
1865                 goto error;
1866
1867         ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1868         if (ret)
1869                 goto destroy_io_cq;
1870
1871         return 0;
1872
1873 destroy_io_cq:
1874         ena_com_destroy_io_cq(ena_dev, io_cq);
1875 error:
1876         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1877         return ret;
1878 }
1879
1880 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1881 {
1882         struct ena_com_io_sq *io_sq;
1883         struct ena_com_io_cq *io_cq;
1884
1885         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1886                 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1887                             qid, ENA_TOTAL_NUM_QUEUES);
1888                 return;
1889         }
1890
1891         io_sq = &ena_dev->io_sq_queues[qid];
1892         io_cq = &ena_dev->io_cq_queues[qid];
1893
1894         ena_com_destroy_io_sq(ena_dev, io_sq);
1895         ena_com_destroy_io_cq(ena_dev, io_cq);
1896
1897         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1898 }
1899
1900 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1901                             struct ena_admin_get_feat_resp *resp)
1902 {
1903         return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1904 }
1905
1906 int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
1907 {
1908         struct ena_admin_get_feat_resp resp;
1909         struct ena_extra_properties_strings *extra_properties_strings =
1910                         &ena_dev->extra_properties_strings;
1911         u32 rc;
1912         extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
1913                 ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
1914
1915         ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1916                                extra_properties_strings->size,
1917                                extra_properties_strings->virt_addr,
1918                                extra_properties_strings->dma_addr,
1919                                extra_properties_strings->dma_handle);
1920         if (unlikely(!extra_properties_strings->virt_addr)) {
1921                 ena_trc_err("Failed to allocate extra properties strings\n");
1922                 return 0;
1923         }
1924
1925         rc = ena_com_get_feature_ex(ena_dev, &resp,
1926                                     ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
1927                                     extra_properties_strings->dma_addr,
1928                                     extra_properties_strings->size, 0);
1929         if (rc) {
1930                 ena_trc_dbg("Failed to get extra properties strings\n");
1931                 goto err;
1932         }
1933
1934         return resp.u.extra_properties_strings.count;
1935 err:
1936         ena_com_delete_extra_properties_strings(ena_dev);
1937         return 0;
1938 }
1939
1940 void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
1941 {
1942         struct ena_extra_properties_strings *extra_properties_strings =
1943                                 &ena_dev->extra_properties_strings;
1944
1945         if (extra_properties_strings->virt_addr) {
1946                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1947                                       extra_properties_strings->size,
1948                                       extra_properties_strings->virt_addr,
1949                                       extra_properties_strings->dma_addr,
1950                                       extra_properties_strings->dma_handle);
1951                 extra_properties_strings->virt_addr = NULL;
1952         }
1953 }
1954
1955 int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
1956                                        struct ena_admin_get_feat_resp *resp)
1957 {
1958         return ena_com_get_feature(ena_dev, resp,
1959                                    ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0);
1960 }
1961
1962 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1963                               struct ena_com_dev_get_features_ctx *get_feat_ctx)
1964 {
1965         struct ena_admin_get_feat_resp get_resp;
1966         int rc;
1967
1968         rc = ena_com_get_feature(ena_dev, &get_resp,
1969                                  ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1970         if (rc)
1971                 return rc;
1972
1973         memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1974                sizeof(get_resp.u.dev_attr));
1975         ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1976
1977         if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1978                 rc = ena_com_get_feature(ena_dev, &get_resp,
1979                                          ENA_ADMIN_MAX_QUEUES_EXT,
1980                                          ENA_FEATURE_MAX_QUEUE_EXT_VER);
1981                 if (rc)
1982                         return rc;
1983
1984                 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1985                         return -EINVAL;
1986
1987                 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1988                        sizeof(get_resp.u.max_queue_ext));
1989                 ena_dev->tx_max_header_size =
1990                         get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1991         } else {
1992                 rc = ena_com_get_feature(ena_dev, &get_resp,
1993                                          ENA_ADMIN_MAX_QUEUES_NUM, 0);
1994                 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1995                        sizeof(get_resp.u.max_queue));
1996                 ena_dev->tx_max_header_size =
1997                         get_resp.u.max_queue.max_header_size;
1998
1999                 if (rc)
2000                         return rc;
2001         }
2002
2003         rc = ena_com_get_feature(ena_dev, &get_resp,
2004                                  ENA_ADMIN_AENQ_CONFIG, 0);
2005         if (rc)
2006                 return rc;
2007
2008         memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2009                sizeof(get_resp.u.aenq));
2010
2011         rc = ena_com_get_feature(ena_dev, &get_resp,
2012                                  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2013         if (rc)
2014                 return rc;
2015
2016         memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2017                sizeof(get_resp.u.offload));
2018
2019         /* Driver hints isn't mandatory admin command. So in case the
2020          * command isn't supported set driver hints to 0
2021          */
2022         rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2023
2024         if (!rc)
2025                 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2026                        sizeof(get_resp.u.hw_hints));
2027         else if (rc == ENA_COM_UNSUPPORTED)
2028                 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2029         else
2030                 return rc;
2031
2032         rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2033         if (!rc)
2034                 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2035                        sizeof(get_resp.u.llq));
2036         else if (rc == ENA_COM_UNSUPPORTED)
2037                 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2038         else
2039                 return rc;
2040
2041         rc = ena_com_get_feature(ena_dev, &get_resp,
2042                                  ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
2043         if (!rc)
2044                 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
2045                        sizeof(get_resp.u.ind_table));
2046         else if (rc == ENA_COM_UNSUPPORTED)
2047                 memset(&get_feat_ctx->ind_table, 0x0,
2048                        sizeof(get_feat_ctx->ind_table));
2049         else
2050                 return rc;
2051
2052         return 0;
2053 }
2054
2055 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2056 {
2057         ena_com_handle_admin_completion(&ena_dev->admin_queue);
2058 }
2059
2060 /* ena_handle_specific_aenq_event:
2061  * return the handler that is relevant to the specific event group
2062  */
2063 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2064                                                      u16 group)
2065 {
2066         struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2067
2068         if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2069                 return aenq_handlers->handlers[group];
2070
2071         return aenq_handlers->unimplemented_handler;
2072 }
2073
2074 /* ena_aenq_intr_handler:
2075  * handles the aenq incoming events.
2076  * pop events from the queue and apply the specific handler
2077  */
2078 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2079 {
2080         struct ena_admin_aenq_entry *aenq_e;
2081         struct ena_admin_aenq_common_desc *aenq_common;
2082         struct ena_com_aenq *aenq  = &dev->aenq;
2083         unsigned long long timestamp;
2084         ena_aenq_handler handler_cb;
2085         u16 masked_head, processed = 0;
2086         u8 phase;
2087
2088         masked_head = aenq->head & (aenq->q_depth - 1);
2089         phase = aenq->phase;
2090         aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2091         aenq_common = &aenq_e->aenq_common_desc;
2092
2093         /* Go over all the events */
2094         while ((READ_ONCE8(aenq_common->flags) &
2095                 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2096                 /* Make sure the phase bit (ownership) is as expected before
2097                  * reading the rest of the descriptor.
2098                  */
2099                 dma_rmb();
2100
2101                 timestamp = (unsigned long long)aenq_common->timestamp_low |
2102                         ((unsigned long long)aenq_common->timestamp_high << 32);
2103                 ENA_TOUCH(timestamp); /* In case debug is disabled */
2104                 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2105                             aenq_common->group,
2106                             aenq_common->syndrom,
2107                             timestamp);
2108
2109                 /* Handle specific event*/
2110                 handler_cb = ena_com_get_specific_aenq_cb(dev,
2111                                                           aenq_common->group);
2112                 handler_cb(data, aenq_e); /* call the actual event handler*/
2113
2114                 /* Get next event entry */
2115                 masked_head++;
2116                 processed++;
2117
2118                 if (unlikely(masked_head == aenq->q_depth)) {
2119                         masked_head = 0;
2120                         phase = !phase;
2121                 }
2122                 aenq_e = &aenq->entries[masked_head];
2123                 aenq_common = &aenq_e->aenq_common_desc;
2124         }
2125
2126         aenq->head += processed;
2127         aenq->phase = phase;
2128
2129         /* Don't update aenq doorbell if there weren't any processed events */
2130         if (!processed)
2131                 return;
2132
2133         /* write the aenq doorbell after all AENQ descriptors were read */
2134         mb();
2135         ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
2136                                 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2137         mmiowb();
2138 }
2139
2140 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2141                       enum ena_regs_reset_reason_types reset_reason)
2142 {
2143         u32 stat, timeout, cap, reset_val;
2144         int rc;
2145
2146         stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2147         cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2148
2149         if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2150                      (cap == ENA_MMIO_READ_TIMEOUT))) {
2151                 ena_trc_err("Reg read32 timeout occurred\n");
2152                 return ENA_COM_TIMER_EXPIRED;
2153         }
2154
2155         if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2156                 ena_trc_err("Device isn't ready, can't reset device\n");
2157                 return ENA_COM_INVAL;
2158         }
2159
2160         timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2161                         ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2162         if (timeout == 0) {
2163                 ena_trc_err("Invalid timeout value\n");
2164                 return ENA_COM_INVAL;
2165         }
2166
2167         /* start reset */
2168         reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2169         reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2170                         ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2171         ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2172
2173         /* Write again the MMIO read request address */
2174         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2175
2176         rc = wait_for_reset_state(ena_dev, timeout,
2177                                   ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2178         if (rc != 0) {
2179                 ena_trc_err("Reset indication didn't turn on\n");
2180                 return rc;
2181         }
2182
2183         /* reset done */
2184         ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2185         rc = wait_for_reset_state(ena_dev, timeout, 0);
2186         if (rc != 0) {
2187                 ena_trc_err("Reset indication didn't turn off\n");
2188                 return rc;
2189         }
2190
2191         timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2192                 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2193         if (timeout)
2194                 /* the resolution of timeout reg is 100ms */
2195                 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2196         else
2197                 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2198
2199         return 0;
2200 }
2201
2202 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2203                              struct ena_com_stats_ctx *ctx,
2204                              enum ena_admin_get_stats_type type)
2205 {
2206         struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2207         struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2208         struct ena_com_admin_queue *admin_queue;
2209         int ret;
2210
2211         admin_queue = &ena_dev->admin_queue;
2212
2213         get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2214         get_cmd->aq_common_descriptor.flags = 0;
2215         get_cmd->type = type;
2216
2217         ret =  ena_com_execute_admin_command(admin_queue,
2218                                              (struct ena_admin_aq_entry *)get_cmd,
2219                                              sizeof(*get_cmd),
2220                                              (struct ena_admin_acq_entry *)get_resp,
2221                                              sizeof(*get_resp));
2222
2223         if (unlikely(ret))
2224                 ena_trc_err("Failed to get stats. error: %d\n", ret);
2225
2226         return ret;
2227 }
2228
2229 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2230                                 struct ena_admin_basic_stats *stats)
2231 {
2232         struct ena_com_stats_ctx ctx;
2233         int ret;
2234
2235         memset(&ctx, 0x0, sizeof(ctx));
2236         ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2237         if (likely(ret == 0))
2238                 memcpy(stats, &ctx.get_resp.basic_stats,
2239                        sizeof(ctx.get_resp.basic_stats));
2240
2241         return ret;
2242 }
2243
2244 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2245 {
2246         struct ena_com_admin_queue *admin_queue;
2247         struct ena_admin_set_feat_cmd cmd;
2248         struct ena_admin_set_feat_resp resp;
2249         int ret;
2250
2251         if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2252                 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2253                 return ENA_COM_UNSUPPORTED;
2254         }
2255
2256         memset(&cmd, 0x0, sizeof(cmd));
2257         admin_queue = &ena_dev->admin_queue;
2258
2259         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2260         cmd.aq_common_descriptor.flags = 0;
2261         cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2262         cmd.u.mtu.mtu = mtu;
2263
2264         ret = ena_com_execute_admin_command(admin_queue,
2265                                             (struct ena_admin_aq_entry *)&cmd,
2266                                             sizeof(cmd),
2267                                             (struct ena_admin_acq_entry *)&resp,
2268                                             sizeof(resp));
2269
2270         if (unlikely(ret))
2271                 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2272
2273         return ret;
2274 }
2275
2276 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2277                                  struct ena_admin_feature_offload_desc *offload)
2278 {
2279         int ret;
2280         struct ena_admin_get_feat_resp resp;
2281
2282         ret = ena_com_get_feature(ena_dev, &resp,
2283                                   ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2284         if (unlikely(ret)) {
2285                 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2286                 return ret;
2287         }
2288
2289         memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2290
2291         return 0;
2292 }
2293
2294 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2295 {
2296         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2297         struct ena_rss *rss = &ena_dev->rss;
2298         struct ena_admin_set_feat_cmd cmd;
2299         struct ena_admin_set_feat_resp resp;
2300         struct ena_admin_get_feat_resp get_resp;
2301         int ret;
2302
2303         if (!ena_com_check_supported_feature_id(ena_dev,
2304                                                 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2305                 ena_trc_dbg("Feature %d isn't supported\n",
2306                             ENA_ADMIN_RSS_HASH_FUNCTION);
2307                 return ENA_COM_UNSUPPORTED;
2308         }
2309
2310         /* Validate hash function is supported */
2311         ret = ena_com_get_feature(ena_dev, &get_resp,
2312                                   ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2313         if (unlikely(ret))
2314                 return ret;
2315
2316         if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2317                 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2318                             rss->hash_func);
2319                 return ENA_COM_UNSUPPORTED;
2320         }
2321
2322         memset(&cmd, 0x0, sizeof(cmd));
2323
2324         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2325         cmd.aq_common_descriptor.flags =
2326                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2327         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2328         cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2329         cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2330
2331         ret = ena_com_mem_addr_set(ena_dev,
2332                                    &cmd.control_buffer.address,
2333                                    rss->hash_key_dma_addr);
2334         if (unlikely(ret)) {
2335                 ena_trc_err("memory address set failed\n");
2336                 return ret;
2337         }
2338
2339         cmd.control_buffer.length = sizeof(*rss->hash_key);
2340
2341         ret = ena_com_execute_admin_command(admin_queue,
2342                                             (struct ena_admin_aq_entry *)&cmd,
2343                                             sizeof(cmd),
2344                                             (struct ena_admin_acq_entry *)&resp,
2345                                             sizeof(resp));
2346         if (unlikely(ret)) {
2347                 ena_trc_err("Failed to set hash function %d. error: %d\n",
2348                             rss->hash_func, ret);
2349                 return ENA_COM_INVAL;
2350         }
2351
2352         return 0;
2353 }
2354
2355 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2356                                enum ena_admin_hash_functions func,
2357                                const u8 *key, u16 key_len, u32 init_val)
2358 {
2359         struct ena_rss *rss = &ena_dev->rss;
2360         struct ena_admin_get_feat_resp get_resp;
2361         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2362                 rss->hash_key;
2363         int rc;
2364
2365         /* Make sure size is a mult of DWs */
2366         if (unlikely(key_len & 0x3))
2367                 return ENA_COM_INVAL;
2368
2369         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2370                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2371                                     rss->hash_key_dma_addr,
2372                                     sizeof(*rss->hash_key), 0);
2373         if (unlikely(rc))
2374                 return rc;
2375
2376         if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2377                 ena_trc_err("Flow hash function %d isn't supported\n", func);
2378                 return ENA_COM_UNSUPPORTED;
2379         }
2380
2381         switch (func) {
2382         case ENA_ADMIN_TOEPLITZ:
2383                 if (key_len > sizeof(hash_key->key)) {
2384                         ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2385                                     key_len, sizeof(hash_key->key));
2386                         return ENA_COM_INVAL;
2387                 }
2388
2389                 memcpy(hash_key->key, key, key_len);
2390                 rss->hash_init_val = init_val;
2391                 hash_key->keys_num = key_len >> 2;
2392                 break;
2393         case ENA_ADMIN_CRC32:
2394                 rss->hash_init_val = init_val;
2395                 break;
2396         default:
2397                 ena_trc_err("Invalid hash function (%d)\n", func);
2398                 return ENA_COM_INVAL;
2399         }
2400
2401         rc = ena_com_set_hash_function(ena_dev);
2402
2403         /* Restore the old function */
2404         if (unlikely(rc))
2405                 ena_com_get_hash_function(ena_dev, NULL, NULL);
2406
2407         return rc;
2408 }
2409
2410 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2411                               enum ena_admin_hash_functions *func,
2412                               u8 *key)
2413 {
2414         struct ena_rss *rss = &ena_dev->rss;
2415         struct ena_admin_get_feat_resp get_resp;
2416         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2417                 rss->hash_key;
2418         int rc;
2419
2420         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2421                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2422                                     rss->hash_key_dma_addr,
2423                                     sizeof(*rss->hash_key), 0);
2424         if (unlikely(rc))
2425                 return rc;
2426
2427         rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2428         if (func)
2429                 *func = rss->hash_func;
2430
2431         if (key)
2432                 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2433
2434         return 0;
2435 }
2436
2437 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2438                           enum ena_admin_flow_hash_proto proto,
2439                           u16 *fields)
2440 {
2441         struct ena_rss *rss = &ena_dev->rss;
2442         struct ena_admin_get_feat_resp get_resp;
2443         int rc;
2444
2445         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2446                                     ENA_ADMIN_RSS_HASH_INPUT,
2447                                     rss->hash_ctrl_dma_addr,
2448                                     sizeof(*rss->hash_ctrl), 0);
2449         if (unlikely(rc))
2450                 return rc;
2451
2452         if (fields)
2453                 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2454
2455         return 0;
2456 }
2457
2458 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2459 {
2460         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2461         struct ena_rss *rss = &ena_dev->rss;
2462         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2463         struct ena_admin_set_feat_cmd cmd;
2464         struct ena_admin_set_feat_resp resp;
2465         int ret;
2466
2467         if (!ena_com_check_supported_feature_id(ena_dev,
2468                                                 ENA_ADMIN_RSS_HASH_INPUT)) {
2469                 ena_trc_dbg("Feature %d isn't supported\n",
2470                             ENA_ADMIN_RSS_HASH_INPUT);
2471                 return ENA_COM_UNSUPPORTED;
2472         }
2473
2474         memset(&cmd, 0x0, sizeof(cmd));
2475
2476         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2477         cmd.aq_common_descriptor.flags =
2478                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2479         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2480         cmd.u.flow_hash_input.enabled_input_sort =
2481                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2482                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2483
2484         ret = ena_com_mem_addr_set(ena_dev,
2485                                    &cmd.control_buffer.address,
2486                                    rss->hash_ctrl_dma_addr);
2487         if (unlikely(ret)) {
2488                 ena_trc_err("memory address set failed\n");
2489                 return ret;
2490         }
2491         cmd.control_buffer.length = sizeof(*hash_ctrl);
2492
2493         ret = ena_com_execute_admin_command(admin_queue,
2494                                             (struct ena_admin_aq_entry *)&cmd,
2495                                             sizeof(cmd),
2496                                             (struct ena_admin_acq_entry *)&resp,
2497                                             sizeof(resp));
2498         if (unlikely(ret))
2499                 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2500
2501         return ret;
2502 }
2503
2504 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2505 {
2506         struct ena_rss *rss = &ena_dev->rss;
2507         struct ena_admin_feature_rss_hash_control *hash_ctrl =
2508                 rss->hash_ctrl;
2509         u16 available_fields = 0;
2510         int rc, i;
2511
2512         /* Get the supported hash input */
2513         rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2514         if (unlikely(rc))
2515                 return rc;
2516
2517         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2518                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2519                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2520
2521         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2522                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2523                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2524
2525         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2526                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2527                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2528
2529         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2530                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2531                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2532
2533         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2534                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2535
2536         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2537                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2538
2539         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2540                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2541
2542         hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2543                 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2544
2545         for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2546                 available_fields = hash_ctrl->selected_fields[i].fields &
2547                                 hash_ctrl->supported_fields[i].fields;
2548                 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2549                         ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2550                                     i, hash_ctrl->supported_fields[i].fields,
2551                                     hash_ctrl->selected_fields[i].fields);
2552                         return ENA_COM_UNSUPPORTED;
2553                 }
2554         }
2555
2556         rc = ena_com_set_hash_ctrl(ena_dev);
2557
2558         /* In case of failure, restore the old hash ctrl */
2559         if (unlikely(rc))
2560                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2561
2562         return rc;
2563 }
2564
2565 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2566                            enum ena_admin_flow_hash_proto proto,
2567                            u16 hash_fields)
2568 {
2569         struct ena_rss *rss = &ena_dev->rss;
2570         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2571         u16 supported_fields;
2572         int rc;
2573
2574         if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2575                 ena_trc_err("Invalid proto num (%u)\n", proto);
2576                 return ENA_COM_INVAL;
2577         }
2578
2579         /* Get the ctrl table */
2580         rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2581         if (unlikely(rc))
2582                 return rc;
2583
2584         /* Make sure all the fields are supported */
2585         supported_fields = hash_ctrl->supported_fields[proto].fields;
2586         if ((hash_fields & supported_fields) != hash_fields) {
2587                 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2588                             proto, hash_fields, supported_fields);
2589         }
2590
2591         hash_ctrl->selected_fields[proto].fields = hash_fields;
2592
2593         rc = ena_com_set_hash_ctrl(ena_dev);
2594
2595         /* In case of failure, restore the old hash ctrl */
2596         if (unlikely(rc))
2597                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2598
2599         return 0;
2600 }
2601
2602 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2603                                       u16 entry_idx, u16 entry_value)
2604 {
2605         struct ena_rss *rss = &ena_dev->rss;
2606
2607         if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2608                 return ENA_COM_INVAL;
2609
2610         if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2611                 return ENA_COM_INVAL;
2612
2613         rss->host_rss_ind_tbl[entry_idx] = entry_value;
2614
2615         return 0;
2616 }
2617
2618 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2619 {
2620         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2621         struct ena_rss *rss = &ena_dev->rss;
2622         struct ena_admin_set_feat_cmd cmd;
2623         struct ena_admin_set_feat_resp resp;
2624         int ret;
2625
2626         if (!ena_com_check_supported_feature_id(ena_dev,
2627                                                 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2628                 ena_trc_dbg("Feature %d isn't supported\n",
2629                             ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2630                 return ENA_COM_UNSUPPORTED;
2631         }
2632
2633         ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2634         if (ret) {
2635                 ena_trc_err("Failed to convert host indirection table to device table\n");
2636                 return ret;
2637         }
2638
2639         memset(&cmd, 0x0, sizeof(cmd));
2640
2641         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2642         cmd.aq_common_descriptor.flags =
2643                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2644         cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2645         cmd.u.ind_table.size = rss->tbl_log_size;
2646         cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2647
2648         ret = ena_com_mem_addr_set(ena_dev,
2649                                    &cmd.control_buffer.address,
2650                                    rss->rss_ind_tbl_dma_addr);
2651         if (unlikely(ret)) {
2652                 ena_trc_err("memory address set failed\n");
2653                 return ret;
2654         }
2655
2656         cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2657                 sizeof(struct ena_admin_rss_ind_table_entry);
2658
2659         ret = ena_com_execute_admin_command(admin_queue,
2660                                             (struct ena_admin_aq_entry *)&cmd,
2661                                             sizeof(cmd),
2662                                             (struct ena_admin_acq_entry *)&resp,
2663                                             sizeof(resp));
2664
2665         if (unlikely(ret))
2666                 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2667
2668         return ret;
2669 }
2670
2671 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2672 {
2673         struct ena_rss *rss = &ena_dev->rss;
2674         struct ena_admin_get_feat_resp get_resp;
2675         u32 tbl_size;
2676         int i, rc;
2677
2678         tbl_size = (1ULL << rss->tbl_log_size) *
2679                 sizeof(struct ena_admin_rss_ind_table_entry);
2680
2681         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2682                                     ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2683                                     rss->rss_ind_tbl_dma_addr,
2684                                     tbl_size, 0);
2685         if (unlikely(rc))
2686                 return rc;
2687
2688         if (!ind_tbl)
2689                 return 0;
2690
2691         rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2692         if (unlikely(rc))
2693                 return rc;
2694
2695         for (i = 0; i < (1 << rss->tbl_log_size); i++)
2696                 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2697
2698         return 0;
2699 }
2700
2701 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2702 {
2703         int rc;
2704
2705         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2706
2707         rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2708         if (unlikely(rc))
2709                 goto err_indr_tbl;
2710
2711         rc = ena_com_hash_key_allocate(ena_dev);
2712         if (unlikely(rc))
2713                 goto err_hash_key;
2714
2715         rc = ena_com_hash_ctrl_init(ena_dev);
2716         if (unlikely(rc))
2717                 goto err_hash_ctrl;
2718
2719         return 0;
2720
2721 err_hash_ctrl:
2722         ena_com_hash_key_destroy(ena_dev);
2723 err_hash_key:
2724         ena_com_indirect_table_destroy(ena_dev);
2725 err_indr_tbl:
2726
2727         return rc;
2728 }
2729
2730 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2731 {
2732         ena_com_indirect_table_destroy(ena_dev);
2733         ena_com_hash_key_destroy(ena_dev);
2734         ena_com_hash_ctrl_destroy(ena_dev);
2735
2736         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2737 }
2738
2739 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2740 {
2741         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2742
2743         ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2744                                SZ_4K,
2745                                host_attr->host_info,
2746                                host_attr->host_info_dma_addr,
2747                                host_attr->host_info_dma_handle);
2748         if (unlikely(!host_attr->host_info))
2749                 return ENA_COM_NO_MEM;
2750
2751         host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2752                 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2753                 (ENA_COMMON_SPEC_VERSION_MINOR));
2754
2755         return 0;
2756 }
2757
2758 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2759                                 u32 debug_area_size)
2760 {
2761         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2762
2763         ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2764                                debug_area_size,
2765                                host_attr->debug_area_virt_addr,
2766                                host_attr->debug_area_dma_addr,
2767                                host_attr->debug_area_dma_handle);
2768         if (unlikely(!host_attr->debug_area_virt_addr)) {
2769                 host_attr->debug_area_size = 0;
2770                 return ENA_COM_NO_MEM;
2771         }
2772
2773         host_attr->debug_area_size = debug_area_size;
2774
2775         return 0;
2776 }
2777
2778 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2779 {
2780         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2781
2782         if (host_attr->host_info) {
2783                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2784                                       SZ_4K,
2785                                       host_attr->host_info,
2786                                       host_attr->host_info_dma_addr,
2787                                       host_attr->host_info_dma_handle);
2788                 host_attr->host_info = NULL;
2789         }
2790 }
2791
2792 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2793 {
2794         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2795
2796         if (host_attr->debug_area_virt_addr) {
2797                 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2798                                       host_attr->debug_area_size,
2799                                       host_attr->debug_area_virt_addr,
2800                                       host_attr->debug_area_dma_addr,
2801                                       host_attr->debug_area_dma_handle);
2802                 host_attr->debug_area_virt_addr = NULL;
2803         }
2804 }
2805
2806 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2807 {
2808         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2809         struct ena_com_admin_queue *admin_queue;
2810         struct ena_admin_set_feat_cmd cmd;
2811         struct ena_admin_set_feat_resp resp;
2812
2813         int ret;
2814
2815         /* Host attribute config is called before ena_com_get_dev_attr_feat
2816          * so ena_com can't check if the feature is supported.
2817          */
2818
2819         memset(&cmd, 0x0, sizeof(cmd));
2820         admin_queue = &ena_dev->admin_queue;
2821
2822         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2823         cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2824
2825         ret = ena_com_mem_addr_set(ena_dev,
2826                                    &cmd.u.host_attr.debug_ba,
2827                                    host_attr->debug_area_dma_addr);
2828         if (unlikely(ret)) {
2829                 ena_trc_err("memory address set failed\n");
2830                 return ret;
2831         }
2832
2833         ret = ena_com_mem_addr_set(ena_dev,
2834                                    &cmd.u.host_attr.os_info_ba,
2835                                    host_attr->host_info_dma_addr);
2836         if (unlikely(ret)) {
2837                 ena_trc_err("memory address set failed\n");
2838                 return ret;
2839         }
2840
2841         cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2842
2843         ret = ena_com_execute_admin_command(admin_queue,
2844                                             (struct ena_admin_aq_entry *)&cmd,
2845                                             sizeof(cmd),
2846                                             (struct ena_admin_acq_entry *)&resp,
2847                                             sizeof(resp));
2848
2849         if (unlikely(ret))
2850                 ena_trc_err("Failed to set host attributes: %d\n", ret);
2851
2852         return ret;
2853 }
2854
2855 /* Interrupt moderation */
2856 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2857 {
2858         return ena_com_check_supported_feature_id(ena_dev,
2859                                                   ENA_ADMIN_INTERRUPT_MODERATION);
2860 }
2861
2862 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2863                                                       u32 tx_coalesce_usecs)
2864 {
2865         if (!ena_dev->intr_delay_resolution) {
2866                 ena_trc_err("Illegal interrupt delay granularity value\n");
2867                 return ENA_COM_FAULT;
2868         }
2869
2870         ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2871                 ena_dev->intr_delay_resolution;
2872
2873         return 0;
2874 }
2875
2876 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2877                                                       u32 rx_coalesce_usecs)
2878 {
2879         if (!ena_dev->intr_delay_resolution) {
2880                 ena_trc_err("Illegal interrupt delay granularity value\n");
2881                 return ENA_COM_FAULT;
2882         }
2883
2884         /* We use LOWEST entry of moderation table for storing
2885          * nonadaptive interrupt coalescing values
2886          */
2887         ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2888                 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2889
2890         return 0;
2891 }
2892
2893 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2894 {
2895         if (ena_dev->intr_moder_tbl)
2896                 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2897         ena_dev->intr_moder_tbl = NULL;
2898 }
2899
2900 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2901 {
2902         struct ena_admin_get_feat_resp get_resp;
2903         u16 delay_resolution;
2904         int rc;
2905
2906         rc = ena_com_get_feature(ena_dev, &get_resp,
2907                                  ENA_ADMIN_INTERRUPT_MODERATION, 0);
2908
2909         if (rc) {
2910                 if (rc == ENA_COM_UNSUPPORTED) {
2911                         ena_trc_dbg("Feature %d isn't supported\n",
2912                                     ENA_ADMIN_INTERRUPT_MODERATION);
2913                         rc = 0;
2914                 } else {
2915                         ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2916                                     rc);
2917                 }
2918
2919                 /* no moderation supported, disable adaptive support */
2920                 ena_com_disable_adaptive_moderation(ena_dev);
2921                 return rc;
2922         }
2923
2924         rc = ena_com_init_interrupt_moderation_table(ena_dev);
2925         if (rc)
2926                 goto err;
2927
2928         /* if moderation is supported by device we set adaptive moderation */
2929         delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2930         ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2931         ena_com_enable_adaptive_moderation(ena_dev);
2932
2933         return 0;
2934 err:
2935         ena_com_destroy_interrupt_moderation(ena_dev);
2936         return rc;
2937 }
2938
2939 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2940 {
2941         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2942
2943         if (!intr_moder_tbl)
2944                 return;
2945
2946         intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2947                 ENA_INTR_LOWEST_USECS;
2948         intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2949                 ENA_INTR_LOWEST_PKTS;
2950         intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2951                 ENA_INTR_LOWEST_BYTES;
2952
2953         intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2954                 ENA_INTR_LOW_USECS;
2955         intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2956                 ENA_INTR_LOW_PKTS;
2957         intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2958                 ENA_INTR_LOW_BYTES;
2959
2960         intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2961                 ENA_INTR_MID_USECS;
2962         intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2963                 ENA_INTR_MID_PKTS;
2964         intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2965                 ENA_INTR_MID_BYTES;
2966
2967         intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2968                 ENA_INTR_HIGH_USECS;
2969         intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2970                 ENA_INTR_HIGH_PKTS;
2971         intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2972                 ENA_INTR_HIGH_BYTES;
2973
2974         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2975                 ENA_INTR_HIGHEST_USECS;
2976         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2977                 ENA_INTR_HIGHEST_PKTS;
2978         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2979                 ENA_INTR_HIGHEST_BYTES;
2980 }
2981
2982 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2983 {
2984         return ena_dev->intr_moder_tx_interval;
2985 }
2986
2987 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2988 {
2989         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2990
2991         if (intr_moder_tbl)
2992                 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2993
2994         return 0;
2995 }
2996
2997 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2998                                         enum ena_intr_moder_level level,
2999                                         struct ena_intr_moder_entry *entry)
3000 {
3001         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3002
3003         if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3004                 return;
3005
3006         intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
3007         if (ena_dev->intr_delay_resolution)
3008                 intr_moder_tbl[level].intr_moder_interval /=
3009                         ena_dev->intr_delay_resolution;
3010         intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
3011
3012         /* use hardcoded value until ethtool supports bytecount parameter */
3013         if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
3014                 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
3015 }
3016
3017 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
3018                                        enum ena_intr_moder_level level,
3019                                        struct ena_intr_moder_entry *entry)
3020 {
3021         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3022
3023         if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3024                 return;
3025
3026         entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
3027         if (ena_dev->intr_delay_resolution)
3028                 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
3029         entry->pkts_per_interval =
3030         intr_moder_tbl[level].pkts_per_interval;
3031         entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
3032 }
3033
3034 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3035                             struct ena_admin_feature_llq_desc *llq_features,
3036                             struct ena_llq_configurations *llq_default_cfg)
3037 {
3038         int rc;
3039         int size;
3040
3041         if (!llq_features->max_llq_num) {
3042                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3043                 return 0;
3044         }
3045
3046         rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3047         if (rc)
3048                 return rc;
3049
3050         /* Validate the descriptor is not too big */
3051         size = ena_dev->tx_max_header_size;
3052         size += ena_dev->llq_info.descs_num_before_header *
3053                 sizeof(struct ena_eth_io_tx_desc);
3054
3055         if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
3056                 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
3057                 return ENA_COM_INVAL;
3058         }
3059
3060         ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3061
3062         return 0;
3063 }