net/iavf: fix default RSS configuration
[dpdk.git] / drivers / net / hns3 / hns3_cmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <ethdev_pci.h>
6 #include <rte_io.h>
7
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_intr.h"
11 #include "hns3_logs.h"
12
13 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
14
15 #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
16
17 static int
18 hns3_ring_space(struct hns3_cmq_ring *ring)
19 {
20         int ntu = ring->next_to_use;
21         int ntc = ring->next_to_clean;
22         int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
23
24         return ring->desc_num - used - 1;
25 }
26
27 static bool
28 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
29 {
30         int ntu = ring->next_to_use;
31         int ntc = ring->next_to_clean;
32
33         if (ntu > ntc)
34                 return head >= ntc && head <= ntu;
35
36         return head >= ntc || head <= ntu;
37 }
38
39 /*
40  * hns3_allocate_dma_mem - Specific memory alloc for command function.
41  * Malloc a memzone, which is a contiguous portion of physical memory identified
42  * by a name.
43  * @ring: pointer to the ring structure
44  * @size: size of memory requested
45  * @alignment: what to align the allocation to
46  */
47 static int
48 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
49                       uint64_t size, uint32_t alignment)
50 {
51         const struct rte_memzone *mz = NULL;
52         char z_name[RTE_MEMZONE_NAMESIZE];
53
54         snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
55         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
56                                          RTE_MEMZONE_IOVA_CONTIG, alignment,
57                                          RTE_PGSIZE_2M);
58         if (mz == NULL)
59                 return -ENOMEM;
60
61         ring->buf_size = size;
62         ring->desc = mz->addr;
63         ring->desc_dma_addr = mz->iova;
64         ring->zone = (const void *)mz;
65         hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
66                  mz->name, ring->desc_dma_addr);
67
68         return 0;
69 }
70
71 static void
72 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
73 {
74         hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
75                  ((const struct rte_memzone *)ring->zone)->name,
76                  ring->desc_dma_addr);
77         rte_memzone_free((const struct rte_memzone *)ring->zone);
78         ring->buf_size = 0;
79         ring->desc = NULL;
80         ring->desc_dma_addr = 0;
81         ring->zone = NULL;
82 }
83
84 static int
85 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
86 {
87         int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
88
89         if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
90                 hns3_err(hw, "allocate dma mem failed");
91                 return -ENOMEM;
92         }
93
94         return 0;
95 }
96
97 static void
98 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
99 {
100         if (ring->desc)
101                 hns3_free_dma_mem(hw, ring);
102 }
103
104 static int
105 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
106 {
107         struct hns3_cmq_ring *ring =
108                 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
109         int ret;
110
111         ring->ring_type = ring_type;
112         ring->hw = hw;
113
114         ret = hns3_alloc_cmd_desc(hw, ring);
115         if (ret)
116                 hns3_err(hw, "descriptor %s alloc error %d",
117                             (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
118
119         return ret;
120 }
121
122 void
123 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
124 {
125         desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
126         if (is_read)
127                 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
128         else
129                 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
130 }
131
132 void
133 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
134                           enum hns3_opcode_type opcode, bool is_read)
135 {
136         memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
137         desc->opcode = rte_cpu_to_le_16(opcode);
138         desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
139
140         if (is_read)
141                 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
142 }
143
144 static void
145 hns3_cmd_clear_regs(struct hns3_hw *hw)
146 {
147         hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
148         hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
149         hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
150         hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
151         hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
152         hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
153         hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
154         hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
155         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
156         hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
157 }
158
159 static void
160 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
161 {
162         uint64_t dma = ring->desc_dma_addr;
163
164         if (ring->ring_type == HNS3_TYPE_CSQ) {
165                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
166                                lower_32_bits(dma));
167                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
168                                upper_32_bits(dma));
169                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
170                                ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
171                                HNS3_NIC_SW_RST_RDY);
172                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
173                 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
174         } else {
175                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
176                                lower_32_bits(dma));
177                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
178                                upper_32_bits(dma));
179                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
180                                ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
181                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
182                 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
183         }
184 }
185
186 static void
187 hns3_cmd_init_regs(struct hns3_hw *hw)
188 {
189         hns3_cmd_config_regs(&hw->cmq.csq);
190         hns3_cmd_config_regs(&hw->cmq.crq);
191 }
192
193 static int
194 hns3_cmd_csq_clean(struct hns3_hw *hw)
195 {
196         struct hns3_cmq_ring *csq = &hw->cmq.csq;
197         uint32_t head;
198         int clean;
199
200         head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
201         if (!is_valid_csq_clean_head(csq, head)) {
202                 hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
203                             csq->next_to_use, csq->next_to_clean);
204                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
205                         rte_atomic16_set(&hw->reset.disable_cmd, 1);
206                         hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
207                 }
208
209                 return -EIO;
210         }
211
212         clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
213         csq->next_to_clean = head;
214         return clean;
215 }
216
217 static int
218 hns3_cmd_csq_done(struct hns3_hw *hw)
219 {
220         uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
221
222         return head == hw->cmq.csq.next_to_use;
223 }
224
225 static bool
226 hns3_is_special_opcode(uint16_t opcode)
227 {
228         /*
229          * These commands have several descriptors,
230          * and use the first one to save opcode and return value.
231          */
232         uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
233                                   HNS3_OPC_STATS_32_BIT,
234                                   HNS3_OPC_STATS_MAC,
235                                   HNS3_OPC_STATS_MAC_ALL,
236                                   HNS3_OPC_QUERY_32_BIT_REG,
237                                   HNS3_OPC_QUERY_64_BIT_REG};
238         uint32_t i;
239
240         for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
241                 if (spec_opcode[i] == opcode)
242                         return true;
243
244         return false;
245 }
246
247 static int
248 hns3_cmd_convert_err_code(uint16_t desc_ret)
249 {
250         static const struct {
251                 uint16_t imp_errcode;
252                 int linux_errcode;
253         } hns3_cmdq_status[] = {
254                 {HNS3_CMD_EXEC_SUCCESS, 0},
255                 {HNS3_CMD_NO_AUTH, -EPERM},
256                 {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
257                 {HNS3_CMD_QUEUE_FULL, -EXFULL},
258                 {HNS3_CMD_NEXT_ERR, -ENOSR},
259                 {HNS3_CMD_UNEXE_ERR, -ENOTBLK},
260                 {HNS3_CMD_PARA_ERR, -EINVAL},
261                 {HNS3_CMD_RESULT_ERR, -ERANGE},
262                 {HNS3_CMD_TIMEOUT, -ETIME},
263                 {HNS3_CMD_HILINK_ERR, -ENOLINK},
264                 {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
265                 {HNS3_CMD_INVALID, -EBADR},
266                 {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
267         };
268
269         uint32_t i;
270
271         for (i = 0; i < ARRAY_SIZE(hns3_cmdq_status); i++)
272                 if (hns3_cmdq_status[i].imp_errcode == desc_ret)
273                         return hns3_cmdq_status[i].linux_errcode;
274
275         return -EREMOTEIO;
276 }
277
278 static int
279 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
280                             struct hns3_cmd_desc *desc, int num, int ntc)
281 {
282         uint16_t opcode, desc_ret;
283         int current_ntc = ntc;
284         int handle;
285
286         opcode = rte_le_to_cpu_16(desc[0].opcode);
287         for (handle = 0; handle < num; handle++) {
288                 /* Get the result of hardware write back */
289                 desc[handle] = hw->cmq.csq.desc[current_ntc];
290
291                 current_ntc++;
292                 if (current_ntc == hw->cmq.csq.desc_num)
293                         current_ntc = 0;
294         }
295
296         if (likely(!hns3_is_special_opcode(opcode)))
297                 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
298         else
299                 desc_ret = rte_le_to_cpu_16(desc[0].retval);
300
301         hw->cmq.last_status = desc_ret;
302         return hns3_cmd_convert_err_code(desc_ret);
303 }
304
305 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
306 {
307         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
308         uint32_t timeout = 0;
309
310         do {
311                 if (hns3_cmd_csq_done(hw))
312                         return 0;
313
314                 if (rte_atomic16_read(&hw->reset.disable_cmd)) {
315                         hns3_err(hw,
316                                  "Don't wait for reply because of disable_cmd");
317                         return -EBUSY;
318                 }
319
320                 if (is_reset_pending(hns)) {
321                         hns3_err(hw, "Don't wait for reply because of reset pending");
322                         return -EIO;
323                 }
324
325                 rte_delay_us(1);
326                 timeout++;
327         } while (timeout < hw->cmq.tx_timeout);
328         hns3_err(hw, "Wait for reply timeout");
329         return -ETIME;
330 }
331
332 /*
333  * hns3_cmd_send - send command to command queue
334  *
335  * @param hw
336  *   pointer to the hw struct
337  * @param desc
338  *   prefilled descriptor for describing the command
339  * @param num
340  *   the number of descriptors to be sent
341  * @return
342  *   - -EBUSY if detect device is in resetting
343  *   - -EIO   if detect cmd csq corrupted (due to reset) or
344  *            there is reset pending
345  *   - -ENOMEM/-ETIME/...(Non-Zero) if other error case
346  *   - Zero   if operation completed successfully
347  *
348  * Note -BUSY/-EIO only used in reset case
349  *
350  * Note this is the main send command for command queue, it
351  * sends the queue, cleans the queue, etc
352  */
353 int
354 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
355 {
356         struct hns3_cmd_desc *desc_to_use;
357         int handle = 0;
358         int retval;
359         uint32_t ntc;
360
361         if (rte_atomic16_read(&hw->reset.disable_cmd))
362                 return -EBUSY;
363
364         rte_spinlock_lock(&hw->cmq.csq.lock);
365
366         /* Clean the command send queue */
367         retval = hns3_cmd_csq_clean(hw);
368         if (retval < 0) {
369                 rte_spinlock_unlock(&hw->cmq.csq.lock);
370                 return retval;
371         }
372
373         if (num > hns3_ring_space(&hw->cmq.csq)) {
374                 rte_spinlock_unlock(&hw->cmq.csq.lock);
375                 return -ENOMEM;
376         }
377
378         /*
379          * Record the location of desc in the ring for this time
380          * which will be use for hardware to write back
381          */
382         ntc = hw->cmq.csq.next_to_use;
383
384         while (handle < num) {
385                 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
386                 *desc_to_use = desc[handle];
387                 (hw->cmq.csq.next_to_use)++;
388                 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
389                         hw->cmq.csq.next_to_use = 0;
390                 handle++;
391         }
392
393         /* Write to hardware */
394         hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
395
396         /*
397          * If the command is sync, wait for the firmware to write back,
398          * if multi descriptors to be sent, use the first one to check.
399          */
400         if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
401                 retval = hns3_cmd_poll_reply(hw);
402                 if (!retval)
403                         retval = hns3_cmd_get_hardware_reply(hw, desc, num,
404                                                              ntc);
405         }
406
407         rte_spinlock_unlock(&hw->cmq.csq.lock);
408         return retval;
409 }
410
411 static void hns3_parse_capability(struct hns3_hw *hw,
412                                   struct hns3_query_version_cmd *cmd)
413 {
414         uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
415
416         if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
417                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
418         if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
419                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
420                              1);
421         if (hns3_get_bit(caps, HNS3_CAPS_PTP_B))
422                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
423         if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
424                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
425         if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
426                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
427         if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
428                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
429         if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
430                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
431 }
432
433 static enum hns3_cmd_status
434 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
435 {
436         struct hns3_query_version_cmd *resp;
437         struct hns3_cmd_desc desc;
438         int ret;
439
440         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
441         resp = (struct hns3_query_version_cmd *)desc.data;
442
443         /* Initialize the cmd function */
444         ret = hns3_cmd_send(hw, &desc, 1);
445         if (ret)
446                 return ret;
447
448         hw->fw_version = rte_le_to_cpu_32(resp->firmware);
449         hns3_parse_capability(hw, resp);
450
451         return 0;
452 }
453
454 int
455 hns3_cmd_init_queue(struct hns3_hw *hw)
456 {
457         int ret;
458
459         /* Setup the lock for command queue */
460         rte_spinlock_init(&hw->cmq.csq.lock);
461         rte_spinlock_init(&hw->cmq.crq.lock);
462
463         /*
464          * Clear up all command register,
465          * in case there are some residual values
466          */
467         hns3_cmd_clear_regs(hw);
468
469         /* Setup the queue entries for use cmd queue */
470         hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
471         hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
472
473         /* Setup Tx write back timeout */
474         hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
475
476         /* Setup queue rings */
477         ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
478         if (ret) {
479                 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
480                 return ret;
481         }
482
483         ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
484         if (ret) {
485                 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
486                 goto err_crq;
487         }
488
489         return 0;
490
491 err_crq:
492         hns3_free_cmd_desc(hw, &hw->cmq.csq);
493
494         return ret;
495 }
496
497 int
498 hns3_cmd_init(struct hns3_hw *hw)
499 {
500         uint32_t version;
501         int ret;
502
503         rte_spinlock_lock(&hw->cmq.csq.lock);
504         rte_spinlock_lock(&hw->cmq.crq.lock);
505
506         hw->cmq.csq.next_to_clean = 0;
507         hw->cmq.csq.next_to_use = 0;
508         hw->cmq.crq.next_to_clean = 0;
509         hw->cmq.crq.next_to_use = 0;
510         hw->mbx_resp.head = 0;
511         hw->mbx_resp.tail = 0;
512         hw->mbx_resp.lost = 0;
513         hns3_cmd_init_regs(hw);
514
515         rte_spinlock_unlock(&hw->cmq.crq.lock);
516         rte_spinlock_unlock(&hw->cmq.csq.lock);
517
518         /*
519          * Check if there is new reset pending, because the higher level
520          * reset may happen when lower level reset is being processed.
521          */
522         if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
523                 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
524                 ret = -EBUSY;
525                 goto err_cmd_init;
526         }
527         rte_atomic16_clear(&hw->reset.disable_cmd);
528
529         ret = hns3_cmd_query_firmware_version_and_capability(hw);
530         if (ret) {
531                 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
532                 goto err_cmd_init;
533         }
534
535         version = hw->fw_version;
536         PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
537                      hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
538                                     HNS3_FW_VERSION_BYTE3_S),
539                      hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
540                                     HNS3_FW_VERSION_BYTE2_S),
541                      hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
542                                     HNS3_FW_VERSION_BYTE1_S),
543                      hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
544                                     HNS3_FW_VERSION_BYTE0_S));
545
546         return 0;
547
548 err_cmd_init:
549         rte_atomic16_set(&hw->reset.disable_cmd, 1);
550         return ret;
551 }
552
553 static void
554 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
555 {
556         rte_spinlock_lock(&ring->lock);
557
558         hns3_free_cmd_desc(hw, ring);
559
560         rte_spinlock_unlock(&ring->lock);
561 }
562
563 void
564 hns3_cmd_destroy_queue(struct hns3_hw *hw)
565 {
566         hns3_destroy_queue(hw, &hw->cmq.csq);
567         hns3_destroy_queue(hw, &hw->cmq.crq);
568 }
569
570 void
571 hns3_cmd_uninit(struct hns3_hw *hw)
572 {
573         rte_spinlock_lock(&hw->cmq.csq.lock);
574         rte_spinlock_lock(&hw->cmq.crq.lock);
575         rte_atomic16_set(&hw->reset.disable_cmd, 1);
576         hns3_cmd_clear_regs(hw);
577         rte_spinlock_unlock(&hw->cmq.crq.lock);
578         rte_spinlock_unlock(&hw->cmq.csq.lock);
579 }