raw/ioat: expand descriptor struct to full 64 bytes
[dpdk.git] / drivers / net / hns3 / hns3_cmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <ethdev_pci.h>
6 #include <rte_io.h>
7
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_intr.h"
11 #include "hns3_logs.h"
12
13 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
14
15 #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
16
17 static int
18 hns3_ring_space(struct hns3_cmq_ring *ring)
19 {
20         int ntu = ring->next_to_use;
21         int ntc = ring->next_to_clean;
22         int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
23
24         return ring->desc_num - used - 1;
25 }
26
27 static bool
28 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
29 {
30         int ntu = ring->next_to_use;
31         int ntc = ring->next_to_clean;
32
33         if (ntu > ntc)
34                 return head >= ntc && head <= ntu;
35
36         return head >= ntc || head <= ntu;
37 }
38
39 /*
40  * hns3_allocate_dma_mem - Specific memory alloc for command function.
41  * Malloc a memzone, which is a contiguous portion of physical memory identified
42  * by a name.
43  * @ring: pointer to the ring structure
44  * @size: size of memory requested
45  * @alignment: what to align the allocation to
46  */
47 static int
48 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
49                       uint64_t size, uint32_t alignment)
50 {
51         const struct rte_memzone *mz = NULL;
52         char z_name[RTE_MEMZONE_NAMESIZE];
53
54         snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
55         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
56                                          RTE_MEMZONE_IOVA_CONTIG, alignment,
57                                          RTE_PGSIZE_2M);
58         if (mz == NULL)
59                 return -ENOMEM;
60
61         ring->buf_size = size;
62         ring->desc = mz->addr;
63         ring->desc_dma_addr = mz->iova;
64         ring->zone = (const void *)mz;
65         hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
66                  mz->name, ring->desc_dma_addr);
67
68         return 0;
69 }
70
71 static void
72 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
73 {
74         hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
75                  ((const struct rte_memzone *)ring->zone)->name,
76                  ring->desc_dma_addr);
77         rte_memzone_free((const struct rte_memzone *)ring->zone);
78         ring->buf_size = 0;
79         ring->desc = NULL;
80         ring->desc_dma_addr = 0;
81         ring->zone = NULL;
82 }
83
84 static int
85 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
86 {
87         int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
88
89         if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
90                 hns3_err(hw, "allocate dma mem failed");
91                 return -ENOMEM;
92         }
93
94         return 0;
95 }
96
97 static void
98 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
99 {
100         if (ring->desc)
101                 hns3_free_dma_mem(hw, ring);
102 }
103
104 static int
105 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
106 {
107         struct hns3_cmq_ring *ring =
108                 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
109         int ret;
110
111         ring->ring_type = ring_type;
112         ring->hw = hw;
113
114         ret = hns3_alloc_cmd_desc(hw, ring);
115         if (ret)
116                 hns3_err(hw, "descriptor %s alloc error %d",
117                             (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
118
119         return ret;
120 }
121
122 void
123 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
124 {
125         desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
126         if (is_read)
127                 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
128         else
129                 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
130 }
131
132 void
133 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
134                           enum hns3_opcode_type opcode, bool is_read)
135 {
136         memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
137         desc->opcode = rte_cpu_to_le_16(opcode);
138         desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
139
140         if (is_read)
141                 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
142 }
143
144 static void
145 hns3_cmd_clear_regs(struct hns3_hw *hw)
146 {
147         hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
148         hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
149         hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
150         hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
151         hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
152         hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
153         hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
154         hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
155         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
156         hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
157 }
158
159 static void
160 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
161 {
162         uint64_t dma = ring->desc_dma_addr;
163
164         if (ring->ring_type == HNS3_TYPE_CSQ) {
165                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
166                                lower_32_bits(dma));
167                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
168                                upper_32_bits(dma));
169                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
170                                ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
171                                HNS3_NIC_SW_RST_RDY);
172                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
173                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
174         } else {
175                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
176                                lower_32_bits(dma));
177                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
178                                upper_32_bits(dma));
179                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
180                                ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
181                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
182                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
183         }
184 }
185
186 static void
187 hns3_cmd_init_regs(struct hns3_hw *hw)
188 {
189         hns3_cmd_config_regs(&hw->cmq.csq);
190         hns3_cmd_config_regs(&hw->cmq.crq);
191 }
192
193 static int
194 hns3_cmd_csq_clean(struct hns3_hw *hw)
195 {
196         struct hns3_cmq_ring *csq = &hw->cmq.csq;
197         uint32_t head;
198         uint32_t addr;
199         int clean;
200
201         head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
202         addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
203         if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
204                 hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
205                          csq->next_to_use, csq->next_to_clean);
206                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207                         __atomic_store_n(&hw->reset.disable_cmd, 1,
208                                          __ATOMIC_RELAXED);
209                         hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
210                 }
211
212                 return -EIO;
213         }
214
215         clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
216         csq->next_to_clean = head;
217         return clean;
218 }
219
220 static int
221 hns3_cmd_csq_done(struct hns3_hw *hw)
222 {
223         uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
224
225         return head == hw->cmq.csq.next_to_use;
226 }
227
228 static bool
229 hns3_is_special_opcode(uint16_t opcode)
230 {
231         /*
232          * These commands have several descriptors,
233          * and use the first one to save opcode and return value.
234          */
235         uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
236                                   HNS3_OPC_STATS_32_BIT,
237                                   HNS3_OPC_STATS_MAC,
238                                   HNS3_OPC_STATS_MAC_ALL,
239                                   HNS3_OPC_QUERY_32_BIT_REG,
240                                   HNS3_OPC_QUERY_64_BIT_REG,
241                                   HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
242                                   HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
243                                   HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
244                                   HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
245                                   HNS3_OPC_QUERY_ALL_ERR_INFO,};
246         uint32_t i;
247
248         for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
249                 if (spec_opcode[i] == opcode)
250                         return true;
251
252         return false;
253 }
254
255 static int
256 hns3_cmd_convert_err_code(uint16_t desc_ret)
257 {
258         static const struct {
259                 uint16_t imp_errcode;
260                 int linux_errcode;
261         } hns3_cmdq_status[] = {
262                 {HNS3_CMD_EXEC_SUCCESS, 0},
263                 {HNS3_CMD_NO_AUTH, -EPERM},
264                 {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
265                 {HNS3_CMD_QUEUE_FULL, -EXFULL},
266                 {HNS3_CMD_NEXT_ERR, -ENOSR},
267                 {HNS3_CMD_UNEXE_ERR, -ENOTBLK},
268                 {HNS3_CMD_PARA_ERR, -EINVAL},
269                 {HNS3_CMD_RESULT_ERR, -ERANGE},
270                 {HNS3_CMD_TIMEOUT, -ETIME},
271                 {HNS3_CMD_HILINK_ERR, -ENOLINK},
272                 {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
273                 {HNS3_CMD_INVALID, -EBADR},
274                 {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
275         };
276
277         uint32_t i;
278
279         for (i = 0; i < ARRAY_SIZE(hns3_cmdq_status); i++)
280                 if (hns3_cmdq_status[i].imp_errcode == desc_ret)
281                         return hns3_cmdq_status[i].linux_errcode;
282
283         return -EREMOTEIO;
284 }
285
286 static int
287 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
288                             struct hns3_cmd_desc *desc, int num, int ntc)
289 {
290         uint16_t opcode, desc_ret;
291         int current_ntc = ntc;
292         int handle;
293
294         opcode = rte_le_to_cpu_16(desc[0].opcode);
295         for (handle = 0; handle < num; handle++) {
296                 /* Get the result of hardware write back */
297                 desc[handle] = hw->cmq.csq.desc[current_ntc];
298
299                 current_ntc++;
300                 if (current_ntc == hw->cmq.csq.desc_num)
301                         current_ntc = 0;
302         }
303
304         if (likely(!hns3_is_special_opcode(opcode)))
305                 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
306         else
307                 desc_ret = rte_le_to_cpu_16(desc[0].retval);
308
309         hw->cmq.last_status = desc_ret;
310         return hns3_cmd_convert_err_code(desc_ret);
311 }
312
313 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
314 {
315         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
316         uint32_t timeout = 0;
317
318         do {
319                 if (hns3_cmd_csq_done(hw))
320                         return 0;
321
322                 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
323                         hns3_err(hw,
324                                  "Don't wait for reply because of disable_cmd");
325                         return -EBUSY;
326                 }
327
328                 if (is_reset_pending(hns)) {
329                         hns3_err(hw, "Don't wait for reply because of reset pending");
330                         return -EIO;
331                 }
332
333                 rte_delay_us(1);
334                 timeout++;
335         } while (timeout < hw->cmq.tx_timeout);
336         hns3_err(hw, "Wait for reply timeout");
337         return -ETIME;
338 }
339
340 /*
341  * hns3_cmd_send - send command to command queue
342  *
343  * @param hw
344  *   pointer to the hw struct
345  * @param desc
346  *   prefilled descriptor for describing the command
347  * @param num
348  *   the number of descriptors to be sent
349  * @return
350  *   - -EBUSY if detect device is in resetting
351  *   - -EIO   if detect cmd csq corrupted (due to reset) or
352  *            there is reset pending
353  *   - -ENOMEM/-ETIME/...(Non-Zero) if other error case
354  *   - Zero   if operation completed successfully
355  *
356  * Note -BUSY/-EIO only used in reset case
357  *
358  * Note this is the main send command for command queue, it
359  * sends the queue, cleans the queue, etc
360  */
361 int
362 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
363 {
364         struct hns3_cmd_desc *desc_to_use;
365         int handle = 0;
366         int retval;
367         uint32_t ntc;
368
369         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
370                 return -EBUSY;
371
372         rte_spinlock_lock(&hw->cmq.csq.lock);
373
374         /* Clean the command send queue */
375         retval = hns3_cmd_csq_clean(hw);
376         if (retval < 0) {
377                 rte_spinlock_unlock(&hw->cmq.csq.lock);
378                 return retval;
379         }
380
381         if (num > hns3_ring_space(&hw->cmq.csq)) {
382                 rte_spinlock_unlock(&hw->cmq.csq.lock);
383                 return -ENOMEM;
384         }
385
386         /*
387          * Record the location of desc in the ring for this time
388          * which will be use for hardware to write back
389          */
390         ntc = hw->cmq.csq.next_to_use;
391
392         while (handle < num) {
393                 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
394                 *desc_to_use = desc[handle];
395                 (hw->cmq.csq.next_to_use)++;
396                 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
397                         hw->cmq.csq.next_to_use = 0;
398                 handle++;
399         }
400
401         /* Write to hardware */
402         hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
403
404         /*
405          * If the command is sync, wait for the firmware to write back,
406          * if multi descriptors to be sent, use the first one to check.
407          */
408         if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
409                 retval = hns3_cmd_poll_reply(hw);
410                 if (!retval)
411                         retval = hns3_cmd_get_hardware_reply(hw, desc, num,
412                                                              ntc);
413         }
414
415         rte_spinlock_unlock(&hw->cmq.csq.lock);
416         return retval;
417 }
418
419 static const char *
420 hns3_get_caps_name(uint32_t caps_id)
421 {
422         const struct {
423                 enum HNS3_CAPS_BITS caps;
424                 const char *name;
425         } dev_caps[] = {
426                 { HNS3_CAPS_UDP_GSO_B,         "udp_gso"         },
427                 { HNS3_CAPS_ATR_B,             "atr"             },
428                 { HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" },
429                 { HNS3_CAPS_PTP_B,             "ptp"             },
430                 { HNS3_CAPS_INT_QL_B,          "int_ql"          },
431                 { HNS3_CAPS_SIMPLE_BD_B,       "simple_bd"       },
432                 { HNS3_CAPS_TX_PUSH_B,         "tx_push"         },
433                 { HNS3_CAPS_PHY_IMP_B,         "phy_imp"         },
434                 { HNS3_CAPS_TQP_TXRX_INDEP_B,  "tqp_txrx_indep"  },
435                 { HNS3_CAPS_HW_PAD_B,          "hw_pad"          },
436                 { HNS3_CAPS_STASH_B,           "stash"           },
437                 { HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
438                 { HNS3_CAPS_RAS_IMP_B,         "ras_imp"         },
439                 { HNS3_CAPS_FEC_B,             "fec"             },
440                 { HNS3_CAPS_PAUSE_B,           "pause"           },
441                 { HNS3_CAPS_RXD_ADV_LAYOUT_B,  "rxd_adv_layout"  }
442         };
443         uint32_t i;
444
445         for (i = 0; i < RTE_DIM(dev_caps); i++) {
446                 if (dev_caps[i].caps == caps_id)
447                         return dev_caps[i].name;
448         }
449
450         return "unknown";
451 }
452
453 static void
454 hns3_mask_capability(struct hns3_hw *hw,
455                      struct hns3_query_version_cmd *cmd)
456 {
457 #define MAX_CAPS_BIT    64
458
459         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
460         uint64_t caps_org, caps_new, caps_masked;
461         uint32_t i;
462
463         if (hns->dev_caps_mask == 0)
464                 return;
465
466         memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org));
467         caps_org = rte_le_to_cpu_64(caps_org);
468         caps_new = caps_org ^ (caps_org & hns->dev_caps_mask);
469         caps_masked = caps_org ^ caps_new;
470         caps_new = rte_cpu_to_le_64(caps_new);
471         memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new));
472
473         for (i = 0; i < MAX_CAPS_BIT; i++) {
474                 if (!(caps_masked & BIT_ULL(i)))
475                         continue;
476                 hns3_info(hw, "mask capabiliy: id-%u, name-%s.",
477                           i, hns3_get_caps_name(i));
478         }
479 }
480
481 static void
482 hns3_parse_capability(struct hns3_hw *hw,
483                       struct hns3_query_version_cmd *cmd)
484 {
485         uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
486
487         if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
488                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
489         if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
490                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
491                              1);
492         if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) {
493                 /*
494                  * PTP depends on special packet type reported by hardware which
495                  * enabled rxd advanced layout, so if the hardware doesn't
496                  * support rxd advanced layout, driver should ignore the PTP
497                  * capability.
498                  */
499                 if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
500                         hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
501                 else
502                         hns3_warn(hw, "ignore PTP capability due to lack of "
503                                   "rxd advanced layout capability.");
504         }
505         if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
506                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
507         if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
508                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
509         if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
510                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
511         if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
512                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
513         if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
514                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
515                              1);
516         if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
517                 hns3_set_bit(hw->capability,
518                                 HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
519         if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
520                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
521 }
522
523 static uint32_t
524 hns3_build_api_caps(void)
525 {
526         uint32_t api_caps = 0;
527
528         hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
529
530         return rte_cpu_to_le_32(api_caps);
531 }
532
533 static int
534 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
535 {
536         struct hns3_query_version_cmd *resp;
537         struct hns3_cmd_desc desc;
538         int ret;
539
540         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
541         resp = (struct hns3_query_version_cmd *)desc.data;
542         resp->api_caps = hns3_build_api_caps();
543
544         /* Initialize the cmd function */
545         ret = hns3_cmd_send(hw, &desc, 1);
546         if (ret)
547                 return ret;
548
549         hw->fw_version = rte_le_to_cpu_32(resp->firmware);
550         /*
551          * Make sure mask the capability before parse capability because it
552          * may overwrite resp's data.
553          */
554         hns3_mask_capability(hw, resp);
555         hns3_parse_capability(hw, resp);
556
557         return 0;
558 }
559
560 int
561 hns3_cmd_init_queue(struct hns3_hw *hw)
562 {
563         int ret;
564
565         /* Setup the lock for command queue */
566         rte_spinlock_init(&hw->cmq.csq.lock);
567         rte_spinlock_init(&hw->cmq.crq.lock);
568
569         /*
570          * Clear up all command register,
571          * in case there are some residual values
572          */
573         hns3_cmd_clear_regs(hw);
574
575         /* Setup the queue entries for use cmd queue */
576         hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
577         hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
578
579         /* Setup Tx write back timeout */
580         hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
581
582         /* Setup queue rings */
583         ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
584         if (ret) {
585                 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
586                 return ret;
587         }
588
589         ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
590         if (ret) {
591                 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
592                 goto err_crq;
593         }
594
595         return 0;
596
597 err_crq:
598         hns3_free_cmd_desc(hw, &hw->cmq.csq);
599
600         return ret;
601 }
602
603 static void
604 hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
605 {
606         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
607
608         if (hw->adapter_state != HNS3_NIC_UNINITIALIZED)
609                 return;
610
611         if (fw_compact_cmd_result != 0) {
612                 /*
613                  * If fw_compact_cmd_result is not zero, it means firmware don't
614                  * support link status change interrupt.
615                  * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
616                  * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear
617                  * the RTE_ETH_DEV_INTR_LSC capability when detect firmware
618                  * don't support link status change interrupt.
619                  */
620                 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
621         }
622 }
623
624 static int
625 hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
626 {
627         if (result != 0 && hns3_dev_copper_supported(hw)) {
628                 hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
629                          result);
630                 return result;
631         }
632
633         hns3_update_dev_lsc_cap(hw, result);
634
635         return 0;
636 }
637
638 static int
639 hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
640 {
641         struct hns3_firmware_compat_cmd *req;
642         struct hns3_cmd_desc desc;
643         uint32_t compat = 0;
644
645 #if defined(RTE_HNS3_ONLY_1630_FPGA)
646         /* If resv reg enabled phy driver of imp is not configured, driver
647          * will use temporary phy driver.
648          */
649         struct rte_pci_device *pci_dev;
650         struct rte_eth_dev *eth_dev;
651         uint8_t revision;
652         int ret;
653
654         eth_dev = &rte_eth_devices[hw->data->port_id];
655         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
656         /* Get PCI revision id */
657         ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
658                                   HNS3_PCI_REVISION_ID);
659         if (ret != HNS3_PCI_REVISION_ID_LEN) {
660                 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
661                              ret);
662                 return -EIO;
663         }
664         if (revision == PCI_REVISION_ID_HIP09_A) {
665                 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
666                 if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) {
667                         PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
668                         pf->is_tmp_phy = true;
669                         hns3_set_bit(hw->capability,
670                                      HNS3_DEV_SUPPORT_COPPER_B, 1);
671                         return 0;
672                 }
673
674                 PMD_INIT_LOG(ERR, "***use phy driver in imp***");
675         }
676 #endif
677
678         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
679         req = (struct hns3_firmware_compat_cmd *)desc.data;
680
681         if (is_init) {
682                 hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
683                 hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
684                 if (hns3_dev_copper_supported(hw))
685                         hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
686         }
687         req->compat = rte_cpu_to_le_32(compat);
688
689         return hns3_cmd_send(hw, &desc, 1);
690 }
691
692 int
693 hns3_cmd_init(struct hns3_hw *hw)
694 {
695         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
696         uint32_t version;
697         int ret;
698
699         rte_spinlock_lock(&hw->cmq.csq.lock);
700         rte_spinlock_lock(&hw->cmq.crq.lock);
701
702         hw->cmq.csq.next_to_clean = 0;
703         hw->cmq.csq.next_to_use = 0;
704         hw->cmq.crq.next_to_clean = 0;
705         hw->cmq.crq.next_to_use = 0;
706         hw->mbx_resp.head = 0;
707         hw->mbx_resp.tail = 0;
708         hw->mbx_resp.lost = 0;
709         hns3_cmd_init_regs(hw);
710
711         rte_spinlock_unlock(&hw->cmq.crq.lock);
712         rte_spinlock_unlock(&hw->cmq.csq.lock);
713
714         /*
715          * Check if there is new reset pending, because the higher level
716          * reset may happen when lower level reset is being processed.
717          */
718         if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
719                 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
720                 ret = -EBUSY;
721                 goto err_cmd_init;
722         }
723         __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
724
725         ret = hns3_cmd_query_firmware_version_and_capability(hw);
726         if (ret) {
727                 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
728                 goto err_cmd_init;
729         }
730
731         version = hw->fw_version;
732         PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
733                      hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
734                                     HNS3_FW_VERSION_BYTE3_S),
735                      hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
736                                     HNS3_FW_VERSION_BYTE2_S),
737                      hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
738                                     HNS3_FW_VERSION_BYTE1_S),
739                      hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
740                                     HNS3_FW_VERSION_BYTE0_S));
741
742         if (hns->is_vf)
743                 return 0;
744
745         /*
746          * Requiring firmware to enable some features, firber port can still
747          * work without it, but copper port can't work because the firmware
748          * fails to take over the PHY.
749          */
750         ret = hns3_firmware_compat_config(hw, true);
751         if (ret)
752                 PMD_INIT_LOG(WARNING, "firmware compatible features not "
753                              "supported, ret = %d.", ret);
754
755         /*
756          * Perform some corresponding operations based on the firmware
757          * compatibility configuration result.
758          */
759         ret = hns3_apply_fw_compat_cmd_result(hw, ret);
760         if (ret)
761                 goto err_cmd_init;
762
763         return 0;
764
765 err_cmd_init:
766         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
767         return ret;
768 }
769
770 static void
771 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
772 {
773         rte_spinlock_lock(&ring->lock);
774
775         hns3_free_cmd_desc(hw, ring);
776
777         rte_spinlock_unlock(&ring->lock);
778 }
779
780 void
781 hns3_cmd_destroy_queue(struct hns3_hw *hw)
782 {
783         hns3_destroy_queue(hw, &hw->cmq.csq);
784         hns3_destroy_queue(hw, &hw->cmq.crq);
785 }
786
787 void
788 hns3_cmd_uninit(struct hns3_hw *hw)
789 {
790         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
791
792         if (!hns->is_vf)
793                 (void)hns3_firmware_compat_config(hw, false);
794
795         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
796
797         /*
798          * A delay is added to ensure that the register cleanup operations
799          * will not be performed concurrently with the firmware command and
800          * ensure that all the reserved commands are executed.
801          * Concurrency may occur in two scenarios: asynchronous command and
802          * timeout command. If the command fails to be executed due to busy
803          * scheduling, the command will be processed in the next scheduling
804          * of the firmware.
805          */
806         rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
807
808         rte_spinlock_lock(&hw->cmq.csq.lock);
809         rte_spinlock_lock(&hw->cmq.crq.lock);
810         hns3_cmd_clear_regs(hw);
811         rte_spinlock_unlock(&hw->cmq.crq.lock);
812         rte_spinlock_unlock(&hw->cmq.csq.lock);
813 }