net/hns3: remove unused macros
[dpdk.git] / drivers / net / hns3 / hns3_cmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <ethdev_pci.h>
6 #include <rte_io.h>
7
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_intr.h"
11 #include "hns3_logs.h"
12
13 static int
14 hns3_ring_space(struct hns3_cmq_ring *ring)
15 {
16         int ntu = ring->next_to_use;
17         int ntc = ring->next_to_clean;
18         int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
19
20         return ring->desc_num - used - 1;
21 }
22
23 static bool
24 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
25 {
26         int ntu = ring->next_to_use;
27         int ntc = ring->next_to_clean;
28
29         if (ntu > ntc)
30                 return head >= ntc && head <= ntu;
31
32         return head >= ntc || head <= ntu;
33 }
34
35 /*
36  * hns3_allocate_dma_mem - Specific memory alloc for command function.
37  * Malloc a memzone, which is a contiguous portion of physical memory identified
38  * by a name.
39  * @ring: pointer to the ring structure
40  * @size: size of memory requested
41  * @alignment: what to align the allocation to
42  */
43 static int
44 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
45                       uint64_t size, uint32_t alignment)
46 {
47         const struct rte_memzone *mz = NULL;
48         char z_name[RTE_MEMZONE_NAMESIZE];
49
50         snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
51         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
52                                          RTE_MEMZONE_IOVA_CONTIG, alignment,
53                                          RTE_PGSIZE_2M);
54         if (mz == NULL)
55                 return -ENOMEM;
56
57         ring->buf_size = size;
58         ring->desc = mz->addr;
59         ring->desc_dma_addr = mz->iova;
60         ring->zone = (const void *)mz;
61         hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
62                  mz->name, ring->desc_dma_addr);
63
64         return 0;
65 }
66
67 static void
68 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
69 {
70         hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
71                  ((const struct rte_memzone *)ring->zone)->name,
72                  ring->desc_dma_addr);
73         rte_memzone_free((const struct rte_memzone *)ring->zone);
74         ring->buf_size = 0;
75         ring->desc = NULL;
76         ring->desc_dma_addr = 0;
77         ring->zone = NULL;
78 }
79
80 static int
81 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
82 {
83         int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
84
85         if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
86                 hns3_err(hw, "allocate dma mem failed");
87                 return -ENOMEM;
88         }
89
90         return 0;
91 }
92
93 static void
94 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
95 {
96         if (ring->desc)
97                 hns3_free_dma_mem(hw, ring);
98 }
99
100 static int
101 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
102 {
103         struct hns3_cmq_ring *ring =
104                 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
105         int ret;
106
107         ring->ring_type = ring_type;
108         ring->hw = hw;
109
110         ret = hns3_alloc_cmd_desc(hw, ring);
111         if (ret)
112                 hns3_err(hw, "descriptor %s alloc error %d",
113                             (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
114
115         return ret;
116 }
117
118 void
119 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
120 {
121         desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
122         if (is_read)
123                 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
124         else
125                 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
126 }
127
128 void
129 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
130                           enum hns3_opcode_type opcode, bool is_read)
131 {
132         memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
133         desc->opcode = rte_cpu_to_le_16(opcode);
134         desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
135
136         if (is_read)
137                 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
138 }
139
140 static void
141 hns3_cmd_clear_regs(struct hns3_hw *hw)
142 {
143         hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
144         hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
145         hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
146         hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
147         hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
148         hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
149         hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
150         hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
151         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
152         hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
153 }
154
155 static void
156 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
157 {
158         uint64_t dma = ring->desc_dma_addr;
159
160         if (ring->ring_type == HNS3_TYPE_CSQ) {
161                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
162                                lower_32_bits(dma));
163                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
164                                upper_32_bits(dma));
165                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
166                                ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
167                                HNS3_NIC_SW_RST_RDY);
168                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
169                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
170         } else {
171                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
172                                lower_32_bits(dma));
173                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
174                                upper_32_bits(dma));
175                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
176                                ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
177                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
178                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
179         }
180 }
181
182 static void
183 hns3_cmd_init_regs(struct hns3_hw *hw)
184 {
185         hns3_cmd_config_regs(&hw->cmq.csq);
186         hns3_cmd_config_regs(&hw->cmq.crq);
187 }
188
189 static int
190 hns3_cmd_csq_clean(struct hns3_hw *hw)
191 {
192         struct hns3_cmq_ring *csq = &hw->cmq.csq;
193         uint32_t head;
194         uint32_t addr;
195         int clean;
196
197         head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
198         addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
199         if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
200                 hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
201                          csq->next_to_use, csq->next_to_clean);
202                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
203                         __atomic_store_n(&hw->reset.disable_cmd, 1,
204                                          __ATOMIC_RELAXED);
205                         hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
206                 }
207
208                 return -EIO;
209         }
210
211         clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
212         csq->next_to_clean = head;
213         return clean;
214 }
215
216 static int
217 hns3_cmd_csq_done(struct hns3_hw *hw)
218 {
219         uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
220
221         return head == hw->cmq.csq.next_to_use;
222 }
223
224 static bool
225 hns3_is_special_opcode(uint16_t opcode)
226 {
227         /*
228          * These commands have several descriptors,
229          * and use the first one to save opcode and return value.
230          */
231         uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
232                                   HNS3_OPC_STATS_32_BIT,
233                                   HNS3_OPC_STATS_MAC,
234                                   HNS3_OPC_STATS_MAC_ALL,
235                                   HNS3_OPC_QUERY_32_BIT_REG,
236                                   HNS3_OPC_QUERY_64_BIT_REG,
237                                   HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
238                                   HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
239                                   HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
240                                   HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
241                                   HNS3_OPC_QUERY_ALL_ERR_INFO,};
242         uint32_t i;
243
244         for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
245                 if (spec_opcode[i] == opcode)
246                         return true;
247
248         return false;
249 }
250
251 static int
252 hns3_cmd_convert_err_code(uint16_t desc_ret)
253 {
254         static const struct {
255                 uint16_t imp_errcode;
256                 int linux_errcode;
257         } hns3_cmdq_status[] = {
258                 {HNS3_CMD_EXEC_SUCCESS, 0},
259                 {HNS3_CMD_NO_AUTH, -EPERM},
260                 {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
261                 {HNS3_CMD_QUEUE_FULL, -EXFULL},
262                 {HNS3_CMD_NEXT_ERR, -ENOSR},
263                 {HNS3_CMD_UNEXE_ERR, -ENOTBLK},
264                 {HNS3_CMD_PARA_ERR, -EINVAL},
265                 {HNS3_CMD_RESULT_ERR, -ERANGE},
266                 {HNS3_CMD_TIMEOUT, -ETIME},
267                 {HNS3_CMD_HILINK_ERR, -ENOLINK},
268                 {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
269                 {HNS3_CMD_INVALID, -EBADR},
270                 {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
271         };
272
273         uint32_t i;
274
275         for (i = 0; i < ARRAY_SIZE(hns3_cmdq_status); i++)
276                 if (hns3_cmdq_status[i].imp_errcode == desc_ret)
277                         return hns3_cmdq_status[i].linux_errcode;
278
279         return -EREMOTEIO;
280 }
281
282 static int
283 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
284                             struct hns3_cmd_desc *desc, int num, int ntc)
285 {
286         uint16_t opcode, desc_ret;
287         int current_ntc = ntc;
288         int handle;
289
290         opcode = rte_le_to_cpu_16(desc[0].opcode);
291         for (handle = 0; handle < num; handle++) {
292                 /* Get the result of hardware write back */
293                 desc[handle] = hw->cmq.csq.desc[current_ntc];
294
295                 current_ntc++;
296                 if (current_ntc == hw->cmq.csq.desc_num)
297                         current_ntc = 0;
298         }
299
300         if (likely(!hns3_is_special_opcode(opcode)))
301                 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
302         else
303                 desc_ret = rte_le_to_cpu_16(desc[0].retval);
304
305         hw->cmq.last_status = desc_ret;
306         return hns3_cmd_convert_err_code(desc_ret);
307 }
308
309 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
310 {
311         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
312         uint32_t timeout = 0;
313
314         do {
315                 if (hns3_cmd_csq_done(hw))
316                         return 0;
317
318                 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
319                         hns3_err(hw,
320                                  "Don't wait for reply because of disable_cmd");
321                         return -EBUSY;
322                 }
323
324                 if (is_reset_pending(hns)) {
325                         hns3_err(hw, "Don't wait for reply because of reset pending");
326                         return -EIO;
327                 }
328
329                 rte_delay_us(1);
330                 timeout++;
331         } while (timeout < hw->cmq.tx_timeout);
332         hns3_err(hw, "Wait for reply timeout");
333         return -ETIME;
334 }
335
336 /*
337  * hns3_cmd_send - send command to command queue
338  *
339  * @param hw
340  *   pointer to the hw struct
341  * @param desc
342  *   prefilled descriptor for describing the command
343  * @param num
344  *   the number of descriptors to be sent
345  * @return
346  *   - -EBUSY if detect device is in resetting
347  *   - -EIO   if detect cmd csq corrupted (due to reset) or
348  *            there is reset pending
349  *   - -ENOMEM/-ETIME/...(Non-Zero) if other error case
350  *   - Zero   if operation completed successfully
351  *
352  * Note -BUSY/-EIO only used in reset case
353  *
354  * Note this is the main send command for command queue, it
355  * sends the queue, cleans the queue, etc
356  */
357 int
358 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
359 {
360         struct hns3_cmd_desc *desc_to_use;
361         int handle = 0;
362         int retval;
363         uint32_t ntc;
364
365         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
366                 return -EBUSY;
367
368         rte_spinlock_lock(&hw->cmq.csq.lock);
369
370         /* Clean the command send queue */
371         retval = hns3_cmd_csq_clean(hw);
372         if (retval < 0) {
373                 rte_spinlock_unlock(&hw->cmq.csq.lock);
374                 return retval;
375         }
376
377         if (num > hns3_ring_space(&hw->cmq.csq)) {
378                 rte_spinlock_unlock(&hw->cmq.csq.lock);
379                 return -ENOMEM;
380         }
381
382         /*
383          * Record the location of desc in the ring for this time
384          * which will be use for hardware to write back
385          */
386         ntc = hw->cmq.csq.next_to_use;
387
388         while (handle < num) {
389                 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
390                 *desc_to_use = desc[handle];
391                 (hw->cmq.csq.next_to_use)++;
392                 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
393                         hw->cmq.csq.next_to_use = 0;
394                 handle++;
395         }
396
397         /* Write to hardware */
398         hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
399
400         /*
401          * If the command is sync, wait for the firmware to write back,
402          * if multi descriptors to be sent, use the first one to check.
403          */
404         if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
405                 retval = hns3_cmd_poll_reply(hw);
406                 if (!retval)
407                         retval = hns3_cmd_get_hardware_reply(hw, desc, num,
408                                                              ntc);
409         }
410
411         rte_spinlock_unlock(&hw->cmq.csq.lock);
412         return retval;
413 }
414
415 static const char *
416 hns3_get_caps_name(uint32_t caps_id)
417 {
418         const struct {
419                 enum HNS3_CAPS_BITS caps;
420                 const char *name;
421         } dev_caps[] = {
422                 { HNS3_CAPS_UDP_GSO_B,         "udp_gso"         },
423                 { HNS3_CAPS_ATR_B,             "atr"             },
424                 { HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" },
425                 { HNS3_CAPS_PTP_B,             "ptp"             },
426                 { HNS3_CAPS_INT_QL_B,          "int_ql"          },
427                 { HNS3_CAPS_SIMPLE_BD_B,       "simple_bd"       },
428                 { HNS3_CAPS_TX_PUSH_B,         "tx_push"         },
429                 { HNS3_CAPS_PHY_IMP_B,         "phy_imp"         },
430                 { HNS3_CAPS_TQP_TXRX_INDEP_B,  "tqp_txrx_indep"  },
431                 { HNS3_CAPS_HW_PAD_B,          "hw_pad"          },
432                 { HNS3_CAPS_STASH_B,           "stash"           },
433                 { HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
434                 { HNS3_CAPS_RAS_IMP_B,         "ras_imp"         },
435                 { HNS3_CAPS_FEC_B,             "fec"             },
436                 { HNS3_CAPS_PAUSE_B,           "pause"           },
437                 { HNS3_CAPS_RXD_ADV_LAYOUT_B,  "rxd_adv_layout"  }
438         };
439         uint32_t i;
440
441         for (i = 0; i < RTE_DIM(dev_caps); i++) {
442                 if (dev_caps[i].caps == caps_id)
443                         return dev_caps[i].name;
444         }
445
446         return "unknown";
447 }
448
449 static void
450 hns3_mask_capability(struct hns3_hw *hw,
451                      struct hns3_query_version_cmd *cmd)
452 {
453 #define MAX_CAPS_BIT    64
454
455         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
456         uint64_t caps_org, caps_new, caps_masked;
457         uint32_t i;
458
459         if (hns->dev_caps_mask == 0)
460                 return;
461
462         memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org));
463         caps_org = rte_le_to_cpu_64(caps_org);
464         caps_new = caps_org ^ (caps_org & hns->dev_caps_mask);
465         caps_masked = caps_org ^ caps_new;
466         caps_new = rte_cpu_to_le_64(caps_new);
467         memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new));
468
469         for (i = 0; i < MAX_CAPS_BIT; i++) {
470                 if (!(caps_masked & BIT_ULL(i)))
471                         continue;
472                 hns3_info(hw, "mask capabiliy: id-%u, name-%s.",
473                           i, hns3_get_caps_name(i));
474         }
475 }
476
477 static void
478 hns3_parse_capability(struct hns3_hw *hw,
479                       struct hns3_query_version_cmd *cmd)
480 {
481         uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
482
483         if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
484                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
485         if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
486                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
487                              1);
488         if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) {
489                 /*
490                  * PTP depends on special packet type reported by hardware which
491                  * enabled rxd advanced layout, so if the hardware doesn't
492                  * support rxd advanced layout, driver should ignore the PTP
493                  * capability.
494                  */
495                 if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
496                         hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
497                 else
498                         hns3_warn(hw, "ignore PTP capability due to lack of "
499                                   "rxd advanced layout capability.");
500         }
501         if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
502                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
503         if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
504                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
505         if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
506                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
507         if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
508                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
509         if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
510                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
511                              1);
512         if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
513                 hns3_set_bit(hw->capability,
514                                 HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
515         if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
516                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
517 }
518
519 static uint32_t
520 hns3_build_api_caps(void)
521 {
522         uint32_t api_caps = 0;
523
524         hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
525
526         return rte_cpu_to_le_32(api_caps);
527 }
528
529 static int
530 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
531 {
532         struct hns3_query_version_cmd *resp;
533         struct hns3_cmd_desc desc;
534         int ret;
535
536         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
537         resp = (struct hns3_query_version_cmd *)desc.data;
538         resp->api_caps = hns3_build_api_caps();
539
540         /* Initialize the cmd function */
541         ret = hns3_cmd_send(hw, &desc, 1);
542         if (ret)
543                 return ret;
544
545         hw->fw_version = rte_le_to_cpu_32(resp->firmware);
546         /*
547          * Make sure mask the capability before parse capability because it
548          * may overwrite resp's data.
549          */
550         hns3_mask_capability(hw, resp);
551         hns3_parse_capability(hw, resp);
552
553         return 0;
554 }
555
556 int
557 hns3_cmd_init_queue(struct hns3_hw *hw)
558 {
559         int ret;
560
561         /* Setup the lock for command queue */
562         rte_spinlock_init(&hw->cmq.csq.lock);
563         rte_spinlock_init(&hw->cmq.crq.lock);
564
565         /*
566          * Clear up all command register,
567          * in case there are some residual values
568          */
569         hns3_cmd_clear_regs(hw);
570
571         /* Setup the queue entries for use cmd queue */
572         hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
573         hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
574
575         /* Setup Tx write back timeout */
576         hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
577
578         /* Setup queue rings */
579         ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
580         if (ret) {
581                 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
582                 return ret;
583         }
584
585         ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
586         if (ret) {
587                 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
588                 goto err_crq;
589         }
590
591         return 0;
592
593 err_crq:
594         hns3_free_cmd_desc(hw, &hw->cmq.csq);
595
596         return ret;
597 }
598
599 static void
600 hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
601 {
602         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
603
604         if (hw->adapter_state != HNS3_NIC_UNINITIALIZED)
605                 return;
606
607         if (fw_compact_cmd_result != 0) {
608                 /*
609                  * If fw_compact_cmd_result is not zero, it means firmware don't
610                  * support link status change interrupt.
611                  * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
612                  * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear
613                  * the RTE_ETH_DEV_INTR_LSC capability when detect firmware
614                  * don't support link status change interrupt.
615                  */
616                 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
617         }
618 }
619
620 static int
621 hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
622 {
623         if (result != 0 && hns3_dev_copper_supported(hw)) {
624                 hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
625                          result);
626                 return result;
627         }
628
629         hns3_update_dev_lsc_cap(hw, result);
630
631         return 0;
632 }
633
634 static int
635 hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
636 {
637         struct hns3_firmware_compat_cmd *req;
638         struct hns3_cmd_desc desc;
639         uint32_t compat = 0;
640
641 #if defined(RTE_HNS3_ONLY_1630_FPGA)
642         /* If resv reg enabled phy driver of imp is not configured, driver
643          * will use temporary phy driver.
644          */
645         struct rte_pci_device *pci_dev;
646         struct rte_eth_dev *eth_dev;
647         uint8_t revision;
648         int ret;
649
650         eth_dev = &rte_eth_devices[hw->data->port_id];
651         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
652         /* Get PCI revision id */
653         ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
654                                   HNS3_PCI_REVISION_ID);
655         if (ret != HNS3_PCI_REVISION_ID_LEN) {
656                 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
657                              ret);
658                 return -EIO;
659         }
660         if (revision == PCI_REVISION_ID_HIP09_A) {
661                 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
662                 if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) {
663                         PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
664                         pf->is_tmp_phy = true;
665                         hns3_set_bit(hw->capability,
666                                      HNS3_DEV_SUPPORT_COPPER_B, 1);
667                         return 0;
668                 }
669
670                 PMD_INIT_LOG(ERR, "***use phy driver in imp***");
671         }
672 #endif
673
674         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
675         req = (struct hns3_firmware_compat_cmd *)desc.data;
676
677         if (is_init) {
678                 hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
679                 hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
680                 if (hns3_dev_copper_supported(hw))
681                         hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
682         }
683         req->compat = rte_cpu_to_le_32(compat);
684
685         return hns3_cmd_send(hw, &desc, 1);
686 }
687
688 int
689 hns3_cmd_init(struct hns3_hw *hw)
690 {
691         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
692         uint32_t version;
693         int ret;
694
695         rte_spinlock_lock(&hw->cmq.csq.lock);
696         rte_spinlock_lock(&hw->cmq.crq.lock);
697
698         hw->cmq.csq.next_to_clean = 0;
699         hw->cmq.csq.next_to_use = 0;
700         hw->cmq.crq.next_to_clean = 0;
701         hw->cmq.crq.next_to_use = 0;
702         hw->mbx_resp.head = 0;
703         hw->mbx_resp.tail = 0;
704         hw->mbx_resp.lost = 0;
705         hns3_cmd_init_regs(hw);
706
707         rte_spinlock_unlock(&hw->cmq.crq.lock);
708         rte_spinlock_unlock(&hw->cmq.csq.lock);
709
710         /*
711          * Check if there is new reset pending, because the higher level
712          * reset may happen when lower level reset is being processed.
713          */
714         if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
715                 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
716                 ret = -EBUSY;
717                 goto err_cmd_init;
718         }
719         __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
720
721         ret = hns3_cmd_query_firmware_version_and_capability(hw);
722         if (ret) {
723                 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
724                 goto err_cmd_init;
725         }
726
727         version = hw->fw_version;
728         PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
729                      hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
730                                     HNS3_FW_VERSION_BYTE3_S),
731                      hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
732                                     HNS3_FW_VERSION_BYTE2_S),
733                      hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
734                                     HNS3_FW_VERSION_BYTE1_S),
735                      hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
736                                     HNS3_FW_VERSION_BYTE0_S));
737
738         if (hns->is_vf)
739                 return 0;
740
741         /*
742          * Requiring firmware to enable some features, firber port can still
743          * work without it, but copper port can't work because the firmware
744          * fails to take over the PHY.
745          */
746         ret = hns3_firmware_compat_config(hw, true);
747         if (ret)
748                 PMD_INIT_LOG(WARNING, "firmware compatible features not "
749                              "supported, ret = %d.", ret);
750
751         /*
752          * Perform some corresponding operations based on the firmware
753          * compatibility configuration result.
754          */
755         ret = hns3_apply_fw_compat_cmd_result(hw, ret);
756         if (ret)
757                 goto err_cmd_init;
758
759         return 0;
760
761 err_cmd_init:
762         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
763         return ret;
764 }
765
766 static void
767 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
768 {
769         rte_spinlock_lock(&ring->lock);
770
771         hns3_free_cmd_desc(hw, ring);
772
773         rte_spinlock_unlock(&ring->lock);
774 }
775
776 void
777 hns3_cmd_destroy_queue(struct hns3_hw *hw)
778 {
779         hns3_destroy_queue(hw, &hw->cmq.csq);
780         hns3_destroy_queue(hw, &hw->cmq.crq);
781 }
782
783 void
784 hns3_cmd_uninit(struct hns3_hw *hw)
785 {
786         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
787
788         if (!hns->is_vf)
789                 (void)hns3_firmware_compat_config(hw, false);
790
791         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
792
793         /*
794          * A delay is added to ensure that the register cleanup operations
795          * will not be performed concurrently with the firmware command and
796          * ensure that all the reserved commands are executed.
797          * Concurrency may occur in two scenarios: asynchronous command and
798          * timeout command. If the command fails to be executed due to busy
799          * scheduling, the command will be processed in the next scheduling
800          * of the firmware.
801          */
802         rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
803
804         rte_spinlock_lock(&hw->cmq.csq.lock);
805         rte_spinlock_lock(&hw->cmq.crq.lock);
806         hns3_cmd_clear_regs(hw);
807         rte_spinlock_unlock(&hw->cmq.crq.lock);
808         rte_spinlock_unlock(&hw->cmq.csq.lock);
809 }