net/hns3: support device reset
[dpdk.git] / drivers / net / hns3 / hns3_cmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <sys/queue.h>
11 #include <inttypes.h>
12 #include <unistd.h>
13 #include <rte_bus_pci.h>
14 #include <rte_common.h>
15 #include <rte_cycles.h>
16 #include <rte_dev.h>
17 #include <rte_eal.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_io.h>
22
23 #include "hns3_ethdev.h"
24 #include "hns3_regs.h"
25 #include "hns3_intr.h"
26 #include "hns3_logs.h"
27
28 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
29
30 #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
31
32 static int
33 hns3_ring_space(struct hns3_cmq_ring *ring)
34 {
35         int ntu = ring->next_to_use;
36         int ntc = ring->next_to_clean;
37         int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
38
39         return ring->desc_num - used - 1;
40 }
41
42 static bool
43 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
44 {
45         int ntu = ring->next_to_use;
46         int ntc = ring->next_to_clean;
47
48         if (ntu > ntc)
49                 return head >= ntc && head <= ntu;
50
51         return head >= ntc || head <= ntu;
52 }
53
54 /*
55  * hns3_allocate_dma_mem - Specific memory alloc for command function.
56  * Malloc a memzone, which is a contiguous portion of physical memory identified
57  * by a name.
58  * @ring: pointer to the ring structure
59  * @size: size of memory requested
60  * @alignment: what to align the allocation to
61  */
62 static int
63 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
64                       uint64_t size, uint32_t alignment)
65 {
66         const struct rte_memzone *mz = NULL;
67         char z_name[RTE_MEMZONE_NAMESIZE];
68
69         snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
70         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
71                                          RTE_MEMZONE_IOVA_CONTIG, alignment,
72                                          RTE_PGSIZE_2M);
73         if (mz == NULL)
74                 return -ENOMEM;
75
76         ring->buf_size = size;
77         ring->desc = mz->addr;
78         ring->desc_dma_addr = mz->iova;
79         ring->zone = (const void *)mz;
80         hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
81                  mz->name, ring->desc_dma_addr);
82
83         return 0;
84 }
85
86 static void
87 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
88 {
89         hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
90                  ((const struct rte_memzone *)ring->zone)->name,
91                  ring->desc_dma_addr);
92         rte_memzone_free((const struct rte_memzone *)ring->zone);
93         ring->buf_size = 0;
94         ring->desc = NULL;
95         ring->desc_dma_addr = 0;
96         ring->zone = NULL;
97 }
98
99 static int
100 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
101 {
102         int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
103
104         if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
105                 hns3_err(hw, "allocate dma mem failed");
106                 return -ENOMEM;
107         }
108
109         return 0;
110 }
111
112 static void
113 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
114 {
115         if (ring->desc)
116                 hns3_free_dma_mem(hw, ring);
117 }
118
119 static int
120 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
121 {
122         struct hns3_cmq_ring *ring =
123                 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
124         int ret;
125
126         ring->ring_type = ring_type;
127         ring->hw = hw;
128
129         ret = hns3_alloc_cmd_desc(hw, ring);
130         if (ret)
131                 hns3_err(hw, "descriptor %s alloc error %d",
132                             (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
133
134         return ret;
135 }
136
137 void
138 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
139 {
140         desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
141         if (is_read)
142                 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
143         else
144                 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
145 }
146
147 void
148 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
149                           enum hns3_opcode_type opcode, bool is_read)
150 {
151         memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
152         desc->opcode = rte_cpu_to_le_16(opcode);
153         desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
154
155         if (is_read)
156                 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
157 }
158
159 static void
160 hns3_cmd_clear_regs(struct hns3_hw *hw)
161 {
162         hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
163         hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
164         hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
165         hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
166         hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
167         hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
168         hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
169         hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
170         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
171         hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
172 }
173
174 static void
175 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
176 {
177         uint64_t dma = ring->desc_dma_addr;
178
179         if (ring->ring_type == HNS3_TYPE_CSQ) {
180                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
181                                lower_32_bits(dma));
182                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
183                                upper_32_bits(dma));
184                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
185                                ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
186                                HNS3_NIC_SW_RST_RDY);
187                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
188                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
189         } else {
190                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
191                                lower_32_bits(dma));
192                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
193                                upper_32_bits(dma));
194                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
195                                ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
196                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
197                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
198         }
199 }
200
201 static void
202 hns3_cmd_init_regs(struct hns3_hw *hw)
203 {
204         hns3_cmd_config_regs(&hw->cmq.csq);
205         hns3_cmd_config_regs(&hw->cmq.crq);
206 }
207
208 static int
209 hns3_cmd_csq_clean(struct hns3_hw *hw)
210 {
211         struct hns3_cmq_ring *csq = &hw->cmq.csq;
212         uint32_t head;
213         int clean;
214
215         head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
216
217         if (!is_valid_csq_clean_head(csq, head)) {
218                 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
219                 uint32_t global;
220                 uint32_t fun_rst;
221                 hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
222                             csq->next_to_use, csq->next_to_clean);
223                 rte_atomic16_set(&hw->reset.disable_cmd, 1);
224                 if (hns->is_vf) {
225                         global = hns3_read_dev(hw, HNS3_VF_RST_ING);
226                         fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
227                         hns3_err(hw, "Delayed VF reset global: %x fun_rst: %x",
228                                  global, fun_rst);
229                         hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
230                 } else {
231                         global = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
232                         fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
233                         hns3_err(hw, "Delayed IMP reset global: %x fun_rst: %x",
234                                  global, fun_rst);
235                         hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
236                 }
237
238                 hns3_schedule_delayed_reset(hns);
239
240                 return -EIO;
241         }
242
243         clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
244         csq->next_to_clean = head;
245         return clean;
246 }
247
248 static int
249 hns3_cmd_csq_done(struct hns3_hw *hw)
250 {
251         uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
252
253         return head == hw->cmq.csq.next_to_use;
254 }
255
256 static bool
257 hns3_is_special_opcode(uint16_t opcode)
258 {
259         /*
260          * These commands have several descriptors,
261          * and use the first one to save opcode and return value.
262          */
263         uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
264                                   HNS3_OPC_STATS_32_BIT,
265                                   HNS3_OPC_STATS_MAC,
266                                   HNS3_OPC_STATS_MAC_ALL,
267                                   HNS3_OPC_QUERY_32_BIT_REG,
268                                   HNS3_OPC_QUERY_64_BIT_REG};
269         uint32_t i;
270
271         for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
272                 if (spec_opcode[i] == opcode)
273                         return true;
274
275         return false;
276 }
277
278 static int
279 hns3_cmd_convert_err_code(uint16_t desc_ret)
280 {
281         switch (desc_ret) {
282         case HNS3_CMD_EXEC_SUCCESS:
283                 return 0;
284         case HNS3_CMD_NO_AUTH:
285                 return -EPERM;
286         case HNS3_CMD_NOT_SUPPORTED:
287                 return -EOPNOTSUPP;
288         case HNS3_CMD_QUEUE_FULL:
289                 return -EXFULL;
290         case HNS3_CMD_NEXT_ERR:
291                 return -ENOSR;
292         case HNS3_CMD_UNEXE_ERR:
293                 return -ENOTBLK;
294         case HNS3_CMD_PARA_ERR:
295                 return -EINVAL;
296         case HNS3_CMD_RESULT_ERR:
297                 return -ERANGE;
298         case HNS3_CMD_TIMEOUT:
299                 return -ETIME;
300         case HNS3_CMD_HILINK_ERR:
301                 return -ENOLINK;
302         case HNS3_CMD_QUEUE_ILLEGAL:
303                 return -ENXIO;
304         case HNS3_CMD_INVALID:
305                 return -EBADR;
306         default:
307                 return -EIO;
308         }
309 }
310
311 static int
312 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
313                             struct hns3_cmd_desc *desc, int num, int ntc)
314 {
315         uint16_t opcode, desc_ret;
316         int current_ntc = ntc;
317         int handle;
318
319         opcode = rte_le_to_cpu_16(desc[0].opcode);
320         for (handle = 0; handle < num; handle++) {
321                 /* Get the result of hardware write back */
322                 desc[handle] = hw->cmq.csq.desc[current_ntc];
323
324                 current_ntc++;
325                 if (current_ntc == hw->cmq.csq.desc_num)
326                         current_ntc = 0;
327         }
328
329         if (likely(!hns3_is_special_opcode(opcode)))
330                 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
331         else
332                 desc_ret = rte_le_to_cpu_16(desc[0].retval);
333
334         hw->cmq.last_status = desc_ret;
335         return hns3_cmd_convert_err_code(desc_ret);
336 }
337
338 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
339 {
340         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
341         uint32_t timeout = 0;
342
343         do {
344                 if (hns3_cmd_csq_done(hw))
345                         return 0;
346
347                 if (rte_atomic16_read(&hw->reset.disable_cmd)) {
348                         hns3_err(hw,
349                                  "Don't wait for reply because of disable_cmd");
350                         return -EBUSY;
351                 }
352
353                 if (is_reset_pending(hns)) {
354                         hns3_err(hw, "Don't wait for reply because of reset pending");
355                         return -EIO;
356                 }
357
358                 rte_delay_us(1);
359                 timeout++;
360         } while (timeout < hw->cmq.tx_timeout);
361         hns3_err(hw, "Wait for reply timeout");
362         return -EBADE;
363 }
364
365 /*
366  * hns3_cmd_send - send command to command queue
367  * @hw: pointer to the hw struct
368  * @desc: prefilled descriptor for describing the command
369  * @num : the number of descriptors to be sent
370  *
371  * This is the main send command for command queue, it
372  * sends the queue, cleans the queue, etc
373  */
374 int
375 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
376 {
377         struct hns3_cmd_desc *desc_to_use;
378         int handle = 0;
379         int retval;
380         uint32_t ntc;
381
382         if (rte_atomic16_read(&hw->reset.disable_cmd))
383                 return -EBUSY;
384
385         rte_spinlock_lock(&hw->cmq.csq.lock);
386
387         /* Clean the command send queue */
388         retval = hns3_cmd_csq_clean(hw);
389         if (retval < 0) {
390                 rte_spinlock_unlock(&hw->cmq.csq.lock);
391                 return retval;
392         }
393
394         if (num > hns3_ring_space(&hw->cmq.csq)) {
395                 rte_spinlock_unlock(&hw->cmq.csq.lock);
396                 return -ENOMEM;
397         }
398
399         /*
400          * Record the location of desc in the ring for this time
401          * which will be use for hardware to write back
402          */
403         ntc = hw->cmq.csq.next_to_use;
404
405         while (handle < num) {
406                 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
407                 *desc_to_use = desc[handle];
408                 (hw->cmq.csq.next_to_use)++;
409                 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
410                         hw->cmq.csq.next_to_use = 0;
411                 handle++;
412         }
413
414         /* Write to hardware */
415         hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
416
417         /*
418          * If the command is sync, wait for the firmware to write back,
419          * if multi descriptors to be sent, use the first one to check.
420          */
421         if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
422                 retval = hns3_cmd_poll_reply(hw);
423                 if (!retval)
424                         retval = hns3_cmd_get_hardware_reply(hw, desc, num,
425                                                              ntc);
426         }
427
428         rte_spinlock_unlock(&hw->cmq.csq.lock);
429         return retval;
430 }
431
432 static enum hns3_cmd_status
433 hns3_cmd_query_firmware_version(struct hns3_hw *hw, uint32_t *version)
434 {
435         struct hns3_query_version_cmd *resp;
436         struct hns3_cmd_desc desc;
437         int ret;
438
439         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
440         resp = (struct hns3_query_version_cmd *)desc.data;
441
442         /* Initialize the cmd function */
443         ret = hns3_cmd_send(hw, &desc, 1);
444         if (ret == 0)
445                 *version = rte_le_to_cpu_32(resp->firmware);
446
447         return ret;
448 }
449
450 int
451 hns3_cmd_init_queue(struct hns3_hw *hw)
452 {
453         int ret;
454
455         /* Setup the lock for command queue */
456         rte_spinlock_init(&hw->cmq.csq.lock);
457         rte_spinlock_init(&hw->cmq.crq.lock);
458
459         /*
460          * Clear up all command register,
461          * in case there are some residual values
462          */
463         hns3_cmd_clear_regs(hw);
464
465         /* Setup the queue entries for use cmd queue */
466         hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
467         hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
468
469         /* Setup Tx write back timeout */
470         hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
471
472         /* Setup queue rings */
473         ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
474         if (ret) {
475                 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
476                 return ret;
477         }
478
479         ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
480         if (ret) {
481                 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
482                 goto err_crq;
483         }
484
485         return 0;
486
487 err_crq:
488         hns3_free_cmd_desc(hw, &hw->cmq.csq);
489
490         return ret;
491 }
492
493 int
494 hns3_cmd_init(struct hns3_hw *hw)
495 {
496         int ret;
497
498         rte_spinlock_lock(&hw->cmq.csq.lock);
499         rte_spinlock_lock(&hw->cmq.crq.lock);
500
501         hw->cmq.csq.next_to_clean = 0;
502         hw->cmq.csq.next_to_use = 0;
503         hw->cmq.crq.next_to_clean = 0;
504         hw->cmq.crq.next_to_use = 0;
505         hw->mbx_resp.head = 0;
506         hw->mbx_resp.tail = 0;
507         hw->mbx_resp.lost = 0;
508         hns3_cmd_init_regs(hw);
509
510         rte_spinlock_unlock(&hw->cmq.crq.lock);
511         rte_spinlock_unlock(&hw->cmq.csq.lock);
512
513         /*
514          * Check if there is new reset pending, because the higher level
515          * reset may happen when lower level reset is being processed.
516          */
517         if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
518                 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
519                 ret = -EBUSY;
520                 goto err_cmd_init;
521         }
522         rte_atomic16_clear(&hw->reset.disable_cmd);
523
524         ret = hns3_cmd_query_firmware_version(hw, &hw->fw_version);
525         if (ret) {
526                 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
527                 goto err_cmd_init;
528         }
529
530         PMD_INIT_LOG(INFO, "The firmware version is %08x", hw->fw_version);
531
532         return 0;
533
534 err_cmd_init:
535         hns3_cmd_uninit(hw);
536         return ret;
537 }
538
539 static void
540 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
541 {
542         rte_spinlock_lock(&ring->lock);
543
544         hns3_free_cmd_desc(hw, ring);
545
546         rte_spinlock_unlock(&ring->lock);
547 }
548
549 void
550 hns3_cmd_destroy_queue(struct hns3_hw *hw)
551 {
552         hns3_destroy_queue(hw, &hw->cmq.csq);
553         hns3_destroy_queue(hw, &hw->cmq.crq);
554 }
555
556 void
557 hns3_cmd_uninit(struct hns3_hw *hw)
558 {
559         rte_spinlock_lock(&hw->cmq.csq.lock);
560         rte_spinlock_lock(&hw->cmq.crq.lock);
561         rte_atomic16_set(&hw->reset.disable_cmd, 1);
562         hns3_cmd_clear_regs(hw);
563         rte_spinlock_unlock(&hw->cmq.crq.lock);
564         rte_spinlock_unlock(&hw->cmq.csq.lock);
565 }