net/hns3: add initialization
[dpdk.git] / drivers / net / hns3 / hns3_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdarg.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <stdint.h>
10 #include <inttypes.h>
11 #include <unistd.h>
12 #include <rte_bus_pci.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
15 #include <rte_dev.h>
16 #include <rte_eal.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_io.h>
21 #include <rte_log.h>
22 #include <rte_pci.h>
23
24 #include "hns3_ethdev.h"
25 #include "hns3_logs.h"
26 #include "hns3_regs.h"
27
28 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE       32
29 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM       1
30
31 int hns3_logtype_init;
32 int hns3_logtype_driver;
33
34 static int
35 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
36                 unsigned int tso_mss_max)
37 {
38         struct hns3_cfg_tso_status_cmd *req;
39         struct hns3_cmd_desc desc;
40         uint16_t tso_mss;
41
42         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
43
44         req = (struct hns3_cfg_tso_status_cmd *)desc.data;
45
46         tso_mss = 0;
47         hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
48                        tso_mss_min);
49         req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
50
51         tso_mss = 0;
52         hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
53                        tso_mss_max);
54         req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
55
56         return hns3_cmd_send(hw, &desc, 1);
57 }
58
59 int
60 hns3_config_gro(struct hns3_hw *hw, bool en)
61 {
62         struct hns3_cfg_gro_status_cmd *req;
63         struct hns3_cmd_desc desc;
64         int ret;
65
66         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
67         req = (struct hns3_cfg_gro_status_cmd *)desc.data;
68
69         req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
70
71         ret = hns3_cmd_send(hw, &desc, 1);
72         if (ret)
73                 hns3_err(hw, "GRO hardware config cmd failed, ret = %d", ret);
74
75         return ret;
76 }
77
78 static int
79 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
80                    uint16_t *allocated_size, bool is_alloc)
81 {
82         struct hns3_umv_spc_alc_cmd *req;
83         struct hns3_cmd_desc desc;
84         int ret;
85
86         req = (struct hns3_umv_spc_alc_cmd *)desc.data;
87         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
88         hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
89         req->space_size = rte_cpu_to_le_32(space_size);
90
91         ret = hns3_cmd_send(hw, &desc, 1);
92         if (ret) {
93                 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
94                              is_alloc ? "allocate" : "free", ret);
95                 return ret;
96         }
97
98         if (is_alloc && allocated_size)
99                 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
100
101         return 0;
102 }
103
104 static int
105 hns3_init_umv_space(struct hns3_hw *hw)
106 {
107         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
108         struct hns3_pf *pf = &hns->pf;
109         uint16_t allocated_size = 0;
110         int ret;
111
112         ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
113                                  true);
114         if (ret)
115                 return ret;
116
117         if (allocated_size < pf->wanted_umv_size)
118                 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
119                              pf->wanted_umv_size, allocated_size);
120
121         pf->max_umv_size = (!!allocated_size) ? allocated_size :
122                                                 pf->wanted_umv_size;
123         pf->used_umv_size = 0;
124         return 0;
125 }
126
127 static int
128 hns3_uninit_umv_space(struct hns3_hw *hw)
129 {
130         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
131         struct hns3_pf *pf = &hns->pf;
132         int ret;
133
134         if (pf->max_umv_size == 0)
135                 return 0;
136
137         ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
138         if (ret)
139                 return ret;
140
141         pf->max_umv_size = 0;
142
143         return 0;
144 }
145
146 static int
147 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
148 {
149         struct hns3_config_max_frm_size_cmd *req;
150         struct hns3_cmd_desc desc;
151
152         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
153
154         req = (struct hns3_config_max_frm_size_cmd *)desc.data;
155         req->max_frm_size = rte_cpu_to_le_16(new_mps);
156         req->min_frm_size = HNS3_MIN_FRAME_LEN;
157
158         return hns3_cmd_send(hw, &desc, 1);
159 }
160
161 static int
162 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
163 {
164         int ret;
165
166         ret = hns3_set_mac_mtu(hw, mps);
167         if (ret) {
168                 hns3_err(hw, "Failed to set mtu, ret = %d", ret);
169                 return ret;
170         }
171
172         ret = hns3_buffer_alloc(hw);
173         if (ret) {
174                 hns3_err(hw, "Failed to allocate buffer, ret = %d", ret);
175                 return ret;
176         }
177
178         return 0;
179 }
180
181 static int
182 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
183 {
184         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
185         struct hns3_pf *pf = &hns->pf;
186
187         if (!(status->pf_state & HNS3_PF_STATE_DONE))
188                 return -EINVAL;
189
190         pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
191
192         return 0;
193 }
194
195 static int
196 hns3_query_function_status(struct hns3_hw *hw)
197 {
198 #define HNS3_QUERY_MAX_CNT              10
199 #define HNS3_QUERY_SLEEP_MSCOEND        1
200         struct hns3_func_status_cmd *req;
201         struct hns3_cmd_desc desc;
202         int timeout = 0;
203         int ret;
204
205         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
206         req = (struct hns3_func_status_cmd *)desc.data;
207
208         do {
209                 ret = hns3_cmd_send(hw, &desc, 1);
210                 if (ret) {
211                         PMD_INIT_LOG(ERR, "query function status failed %d",
212                                      ret);
213                         return ret;
214                 }
215
216                 /* Check pf reset is done */
217                 if (req->pf_state)
218                         break;
219
220                 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
221         } while (timeout++ < HNS3_QUERY_MAX_CNT);
222
223         return hns3_parse_func_status(hw, req);
224 }
225
226 static int
227 hns3_query_pf_resource(struct hns3_hw *hw)
228 {
229         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
230         struct hns3_pf *pf = &hns->pf;
231         struct hns3_pf_res_cmd *req;
232         struct hns3_cmd_desc desc;
233         int ret;
234
235         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
236         ret = hns3_cmd_send(hw, &desc, 1);
237         if (ret) {
238                 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
239                 return ret;
240         }
241
242         req = (struct hns3_pf_res_cmd *)desc.data;
243         hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num);
244         pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
245         hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
246
247         if (req->tx_buf_size)
248                 pf->tx_buf_size =
249                     rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
250         else
251                 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
252
253         pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
254
255         if (req->dv_buf_size)
256                 pf->dv_buf_size =
257                     rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
258         else
259                 pf->dv_buf_size = HNS3_DEFAULT_DV;
260
261         pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
262
263         hw->num_msi =
264             hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
265                            HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
266
267         return 0;
268 }
269
270 static void
271 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
272 {
273         struct hns3_cfg_param_cmd *req;
274         uint64_t mac_addr_tmp_high;
275         uint64_t mac_addr_tmp;
276         uint32_t i;
277
278         req = (struct hns3_cfg_param_cmd *)desc[0].data;
279
280         /* get the configuration */
281         cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
282                                              HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S);
283         cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
284                                      HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
285         cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
286                                            HNS3_CFG_TQP_DESC_N_M,
287                                            HNS3_CFG_TQP_DESC_N_S);
288
289         cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
290                                        HNS3_CFG_PHY_ADDR_M,
291                                        HNS3_CFG_PHY_ADDR_S);
292         cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
293                                          HNS3_CFG_MEDIA_TP_M,
294                                          HNS3_CFG_MEDIA_TP_S);
295         cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
296                                          HNS3_CFG_RX_BUF_LEN_M,
297                                          HNS3_CFG_RX_BUF_LEN_S);
298         /* get mac address */
299         mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
300         mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
301                                            HNS3_CFG_MAC_ADDR_H_M,
302                                            HNS3_CFG_MAC_ADDR_H_S);
303
304         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
305
306         cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
307                                             HNS3_CFG_DEFAULT_SPEED_M,
308                                             HNS3_CFG_DEFAULT_SPEED_S);
309         cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
310                                            HNS3_CFG_RSS_SIZE_M,
311                                            HNS3_CFG_RSS_SIZE_S);
312
313         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
314                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
315
316         req = (struct hns3_cfg_param_cmd *)desc[1].data;
317         cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
318
319         cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
320                                             HNS3_CFG_SPEED_ABILITY_M,
321                                             HNS3_CFG_SPEED_ABILITY_S);
322         cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
323                                         HNS3_CFG_UMV_TBL_SPACE_M,
324                                         HNS3_CFG_UMV_TBL_SPACE_S);
325         if (!cfg->umv_space)
326                 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
327 }
328
329 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
330  * @hw: pointer to struct hns3_hw
331  * @hcfg: the config structure to be getted
332  */
333 static int
334 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
335 {
336         struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
337         struct hns3_cfg_param_cmd *req;
338         uint32_t offset;
339         uint32_t i;
340         int ret;
341
342         for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
343                 offset = 0;
344                 req = (struct hns3_cfg_param_cmd *)desc[i].data;
345                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
346                                           true);
347                 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
348                                i * HNS3_CFG_RD_LEN_BYTES);
349                 /* Len should be divided by 4 when send to hardware */
350                 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
351                                HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
352                 req->offset = rte_cpu_to_le_32(offset);
353         }
354
355         ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
356         if (ret) {
357                 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
358                 return ret;
359         }
360
361         hns3_parse_cfg(hcfg, desc);
362
363         return 0;
364 }
365
366 static int
367 hns3_parse_speed(int speed_cmd, uint32_t *speed)
368 {
369         switch (speed_cmd) {
370         case HNS3_CFG_SPEED_10M:
371                 *speed = ETH_SPEED_NUM_10M;
372                 break;
373         case HNS3_CFG_SPEED_100M:
374                 *speed = ETH_SPEED_NUM_100M;
375                 break;
376         case HNS3_CFG_SPEED_1G:
377                 *speed = ETH_SPEED_NUM_1G;
378                 break;
379         case HNS3_CFG_SPEED_10G:
380                 *speed = ETH_SPEED_NUM_10G;
381                 break;
382         case HNS3_CFG_SPEED_25G:
383                 *speed = ETH_SPEED_NUM_25G;
384                 break;
385         case HNS3_CFG_SPEED_40G:
386                 *speed = ETH_SPEED_NUM_40G;
387                 break;
388         case HNS3_CFG_SPEED_50G:
389                 *speed = ETH_SPEED_NUM_50G;
390                 break;
391         case HNS3_CFG_SPEED_100G:
392                 *speed = ETH_SPEED_NUM_100G;
393                 break;
394         default:
395                 return -EINVAL;
396         }
397
398         return 0;
399 }
400
401 static int
402 hns3_get_board_configuration(struct hns3_hw *hw)
403 {
404         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
405         struct hns3_pf *pf = &hns->pf;
406         struct hns3_cfg cfg;
407         int ret;
408
409         ret = hns3_get_board_cfg(hw, &cfg);
410         if (ret) {
411                 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
412                 return ret;
413         }
414
415         if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER) {
416                 PMD_INIT_LOG(ERR, "media type is copper, not supported.");
417                 return -EOPNOTSUPP;
418         }
419
420         hw->mac.media_type = cfg.media_type;
421         hw->rss_size_max = cfg.rss_size_max;
422         hw->rx_buf_len = cfg.rx_buf_len;
423         memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
424         hw->mac.phy_addr = cfg.phy_addr;
425         hw->mac.default_addr_setted = false;
426         hw->num_tx_desc = cfg.tqp_desc_num;
427         hw->num_rx_desc = cfg.tqp_desc_num;
428         hw->dcb_info.num_pg = 1;
429         hw->dcb_info.hw_pfc_map = 0;
430
431         ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
432         if (ret) {
433                 PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d",
434                              cfg.default_speed, ret);
435                 return ret;
436         }
437
438         pf->tc_max = cfg.tc_num;
439         if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
440                 PMD_INIT_LOG(WARNING,
441                              "Get TC num(%u) from flash, set TC num to 1",
442                              pf->tc_max);
443                 pf->tc_max = 1;
444         }
445
446         /* Dev does not support DCB */
447         if (!hns3_dev_dcb_supported(hw)) {
448                 pf->tc_max = 1;
449                 pf->pfc_max = 0;
450         } else
451                 pf->pfc_max = pf->tc_max;
452
453         hw->dcb_info.num_tc = 1;
454         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
455                                      hw->tqps_num / hw->dcb_info.num_tc);
456         hns3_set_bit(hw->hw_tc_map, 0, 1);
457         pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
458
459         pf->wanted_umv_size = cfg.umv_space;
460
461         return ret;
462 }
463
464 static int
465 hns3_get_configuration(struct hns3_hw *hw)
466 {
467         int ret;
468
469         ret = hns3_query_function_status(hw);
470         if (ret) {
471                 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
472                 return ret;
473         }
474
475         /* Get pf resource */
476         ret = hns3_query_pf_resource(hw);
477         if (ret) {
478                 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
479                 return ret;
480         }
481
482         ret = hns3_get_board_configuration(hw);
483         if (ret) {
484                 PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret);
485                 return ret;
486         }
487
488         return 0;
489 }
490
491 static int
492 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
493                       uint16_t tqp_vid, bool is_pf)
494 {
495         struct hns3_tqp_map_cmd *req;
496         struct hns3_cmd_desc desc;
497         int ret;
498
499         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
500
501         req = (struct hns3_tqp_map_cmd *)desc.data;
502         req->tqp_id = rte_cpu_to_le_16(tqp_pid);
503         req->tqp_vf = func_id;
504         req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
505         if (!is_pf)
506                 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
507         req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
508
509         ret = hns3_cmd_send(hw, &desc, 1);
510         if (ret)
511                 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
512
513         return ret;
514 }
515
516 static int
517 hns3_map_tqp(struct hns3_hw *hw)
518 {
519         uint16_t tqps_num = hw->total_tqps_num;
520         uint16_t func_id;
521         uint16_t tqp_id;
522         int num;
523         int ret;
524         int i;
525
526         /*
527          * In current version VF is not supported when PF is driven by DPDK
528          * driver, so we allocate tqps to PF as much as possible.
529          */
530         tqp_id = 0;
531         num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
532         for (func_id = 0; func_id < num; func_id++) {
533                 for (i = 0;
534                      i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) {
535                         ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i,
536                                                     true);
537                         if (ret)
538                                 return ret;
539                 }
540         }
541
542         return 0;
543 }
544
545 static int
546 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
547 {
548         struct hns3_config_mac_speed_dup_cmd *req;
549         struct hns3_cmd_desc desc;
550         int ret;
551
552         req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
553
554         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
555
556         hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
557
558         switch (speed) {
559         case ETH_SPEED_NUM_10M:
560                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
561                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
562                 break;
563         case ETH_SPEED_NUM_100M:
564                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
565                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
566                 break;
567         case ETH_SPEED_NUM_1G:
568                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
569                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
570                 break;
571         case ETH_SPEED_NUM_10G:
572                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
573                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
574                 break;
575         case ETH_SPEED_NUM_25G:
576                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
577                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
578                 break;
579         case ETH_SPEED_NUM_40G:
580                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
581                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
582                 break;
583         case ETH_SPEED_NUM_50G:
584                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
585                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
586                 break;
587         case ETH_SPEED_NUM_100G:
588                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
589                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
590                 break;
591         default:
592                 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
593                 return -EINVAL;
594         }
595
596         hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
597
598         ret = hns3_cmd_send(hw, &desc, 1);
599         if (ret)
600                 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
601
602         return ret;
603 }
604
605 static int
606 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
607 {
608         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
609         struct hns3_pf *pf = &hns->pf;
610         struct hns3_priv_buf *priv;
611         uint32_t i, total_size;
612
613         total_size = pf->pkt_buf_size;
614
615         /* alloc tx buffer for all enabled tc */
616         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
617                 priv = &buf_alloc->priv_buf[i];
618
619                 if (hw->hw_tc_map & BIT(i)) {
620                         if (total_size < pf->tx_buf_size)
621                                 return -ENOMEM;
622
623                         priv->tx_buf_size = pf->tx_buf_size;
624                 } else
625                         priv->tx_buf_size = 0;
626
627                 total_size -= priv->tx_buf_size;
628         }
629
630         return 0;
631 }
632
633 static int
634 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
635 {
636 /* TX buffer size is unit by 128 byte */
637 #define HNS3_BUF_SIZE_UNIT_SHIFT        7
638 #define HNS3_BUF_SIZE_UPDATE_EN_MSK     BIT(15)
639         struct hns3_tx_buff_alloc_cmd *req;
640         struct hns3_cmd_desc desc;
641         uint32_t buf_size;
642         uint32_t i;
643         int ret;
644
645         req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
646
647         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
648         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
649                 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
650
651                 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
652                 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
653                                                 HNS3_BUF_SIZE_UPDATE_EN_MSK);
654         }
655
656         ret = hns3_cmd_send(hw, &desc, 1);
657         if (ret)
658                 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
659
660         return ret;
661 }
662
663 static int
664 hns3_get_tc_num(struct hns3_hw *hw)
665 {
666         int cnt = 0;
667         uint8_t i;
668
669         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
670                 if (hw->hw_tc_map & BIT(i))
671                         cnt++;
672         return cnt;
673 }
674
675 static uint32_t
676 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
677 {
678         struct hns3_priv_buf *priv;
679         uint32_t rx_priv = 0;
680         int i;
681
682         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
683                 priv = &buf_alloc->priv_buf[i];
684                 if (priv->enable)
685                         rx_priv += priv->buf_size;
686         }
687         return rx_priv;
688 }
689
690 static uint32_t
691 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
692 {
693         uint32_t total_tx_size = 0;
694         uint32_t i;
695
696         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
697                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
698
699         return total_tx_size;
700 }
701
702 /* Get the number of pfc enabled TCs, which have private buffer */
703 static int
704 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
705 {
706         struct hns3_priv_buf *priv;
707         int cnt = 0;
708         uint8_t i;
709
710         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
711                 priv = &buf_alloc->priv_buf[i];
712                 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
713                         cnt++;
714         }
715
716         return cnt;
717 }
718
719 /* Get the number of pfc disabled TCs, which have private buffer */
720 static int
721 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
722                          struct hns3_pkt_buf_alloc *buf_alloc)
723 {
724         struct hns3_priv_buf *priv;
725         int cnt = 0;
726         uint8_t i;
727
728         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
729                 priv = &buf_alloc->priv_buf[i];
730                 if (hw->hw_tc_map & BIT(i) &&
731                     !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
732                         cnt++;
733         }
734
735         return cnt;
736 }
737
738 static bool
739 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
740                   uint32_t rx_all)
741 {
742         uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
743         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
744         struct hns3_pf *pf = &hns->pf;
745         uint32_t shared_buf, aligned_mps;
746         uint32_t rx_priv;
747         uint8_t tc_num;
748         uint8_t i;
749
750         tc_num = hns3_get_tc_num(hw);
751         aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
752
753         if (hns3_dev_dcb_supported(hw))
754                 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
755                                         pf->dv_buf_size;
756         else
757                 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
758                                         + pf->dv_buf_size;
759
760         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
761         shared_std = roundup(max_t(uint32_t, shared_buf_min, shared_buf_tc),
762                              HNS3_BUF_SIZE_UNIT);
763
764         rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
765         if (rx_all < rx_priv + shared_std)
766                 return false;
767
768         shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
769         buf_alloc->s_buf.buf_size = shared_buf;
770         if (hns3_dev_dcb_supported(hw)) {
771                 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
772                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
773                         - roundup(aligned_mps / HNS3_BUF_DIV_BY,
774                                   HNS3_BUF_SIZE_UNIT);
775         } else {
776                 buf_alloc->s_buf.self.high =
777                         aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
778                 buf_alloc->s_buf.self.low = aligned_mps;
779         }
780
781         if (hns3_dev_dcb_supported(hw)) {
782                 hi_thrd = shared_buf - pf->dv_buf_size;
783
784                 if (tc_num <= NEED_RESERVE_TC_NUM)
785                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
786                                         / BUF_MAX_PERCENT;
787
788                 if (tc_num)
789                         hi_thrd = hi_thrd / tc_num;
790
791                 hi_thrd = max_t(uint32_t, hi_thrd,
792                                 HNS3_BUF_MUL_BY * aligned_mps);
793                 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
794                 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
795         } else {
796                 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
797                 lo_thrd = aligned_mps;
798         }
799
800         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
801                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
802                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
803         }
804
805         return true;
806 }
807
808 static bool
809 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
810                      struct hns3_pkt_buf_alloc *buf_alloc)
811 {
812         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
813         struct hns3_pf *pf = &hns->pf;
814         struct hns3_priv_buf *priv;
815         uint32_t aligned_mps;
816         uint32_t rx_all;
817         uint8_t i;
818
819         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
820         aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
821
822         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
823                 priv = &buf_alloc->priv_buf[i];
824
825                 priv->enable = 0;
826                 priv->wl.low = 0;
827                 priv->wl.high = 0;
828                 priv->buf_size = 0;
829
830                 if (!(hw->hw_tc_map & BIT(i)))
831                         continue;
832
833                 priv->enable = 1;
834                 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
835                         priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
836                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
837                                                 HNS3_BUF_SIZE_UNIT);
838                 } else {
839                         priv->wl.low = 0;
840                         priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
841                                         aligned_mps;
842                 }
843
844                 priv->buf_size = priv->wl.high + pf->dv_buf_size;
845         }
846
847         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
848 }
849
850 static bool
851 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
852                              struct hns3_pkt_buf_alloc *buf_alloc)
853 {
854         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
855         struct hns3_pf *pf = &hns->pf;
856         struct hns3_priv_buf *priv;
857         int no_pfc_priv_num;
858         uint32_t rx_all;
859         uint8_t mask;
860         int i;
861
862         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
863         no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
864
865         /* let the last to be cleared first */
866         for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
867                 priv = &buf_alloc->priv_buf[i];
868                 mask = BIT((uint8_t)i);
869
870                 if (hw->hw_tc_map & mask &&
871                     !(hw->dcb_info.hw_pfc_map & mask)) {
872                         /* Clear the no pfc TC private buffer */
873                         priv->wl.low = 0;
874                         priv->wl.high = 0;
875                         priv->buf_size = 0;
876                         priv->enable = 0;
877                         no_pfc_priv_num--;
878                 }
879
880                 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
881                     no_pfc_priv_num == 0)
882                         break;
883         }
884
885         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
886 }
887
888 static bool
889 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
890                            struct hns3_pkt_buf_alloc *buf_alloc)
891 {
892         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
893         struct hns3_pf *pf = &hns->pf;
894         struct hns3_priv_buf *priv;
895         uint32_t rx_all;
896         int pfc_priv_num;
897         uint8_t mask;
898         int i;
899
900         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
901         pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
902
903         /* let the last to be cleared first */
904         for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
905                 priv = &buf_alloc->priv_buf[i];
906                 mask = BIT((uint8_t)i);
907
908                 if (hw->hw_tc_map & mask &&
909                     hw->dcb_info.hw_pfc_map & mask) {
910                         /* Reduce the number of pfc TC with private buffer */
911                         priv->wl.low = 0;
912                         priv->enable = 0;
913                         priv->wl.high = 0;
914                         priv->buf_size = 0;
915                         pfc_priv_num--;
916                 }
917                 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
918                     pfc_priv_num == 0)
919                         break;
920         }
921
922         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
923 }
924
925 static bool
926 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
927                           struct hns3_pkt_buf_alloc *buf_alloc)
928 {
929 #define COMPENSATE_BUFFER       0x3C00
930 #define COMPENSATE_HALF_MPS_NUM 5
931 #define PRIV_WL_GAP             0x1800
932         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
933         struct hns3_pf *pf = &hns->pf;
934         uint32_t tc_num = hns3_get_tc_num(hw);
935         uint32_t half_mps = pf->mps >> 1;
936         struct hns3_priv_buf *priv;
937         uint32_t min_rx_priv;
938         uint32_t rx_priv;
939         uint8_t i;
940
941         rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
942         if (tc_num)
943                 rx_priv = rx_priv / tc_num;
944
945         if (tc_num <= NEED_RESERVE_TC_NUM)
946                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
947
948         /*
949          * Minimum value of private buffer in rx direction (min_rx_priv) is
950          * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
951          * buffer if rx_priv is greater than min_rx_priv.
952          */
953         min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
954                         COMPENSATE_HALF_MPS_NUM * half_mps;
955         min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
956         rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
957
958         if (rx_priv < min_rx_priv)
959                 return false;
960
961         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
962                 priv = &buf_alloc->priv_buf[i];
963
964                 priv->enable = 0;
965                 priv->wl.low = 0;
966                 priv->wl.high = 0;
967                 priv->buf_size = 0;
968
969                 if (!(hw->hw_tc_map & BIT(i)))
970                         continue;
971
972                 priv->enable = 1;
973                 priv->buf_size = rx_priv;
974                 priv->wl.high = rx_priv - pf->dv_buf_size;
975                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
976         }
977
978         buf_alloc->s_buf.buf_size = 0;
979
980         return true;
981 }
982
983 /*
984  * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
985  * @hw: pointer to struct hns3_hw
986  * @buf_alloc: pointer to buffer calculation data
987  * @return: 0: calculate sucessful, negative: fail
988  */
989 static int
990 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
991 {
992         /* When DCB is not supported, rx private buffer is not allocated. */
993         if (!hns3_dev_dcb_supported(hw)) {
994                 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
995                 struct hns3_pf *pf = &hns->pf;
996                 uint32_t rx_all = pf->pkt_buf_size;
997
998                 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
999                 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
1000                         return -ENOMEM;
1001
1002                 return 0;
1003         }
1004
1005         /*
1006          * Try to allocate privated packet buffer for all TCs without share
1007          * buffer.
1008          */
1009         if (hns3_only_alloc_priv_buff(hw, buf_alloc))
1010                 return 0;
1011
1012         /*
1013          * Try to allocate privated packet buffer for all TCs with share
1014          * buffer.
1015          */
1016         if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
1017                 return 0;
1018
1019         /*
1020          * For different application scenes, the enabled port number, TC number
1021          * and no_drop TC number are different. In order to obtain the better
1022          * performance, software could allocate the buffer size and configure
1023          * the waterline by tring to decrease the private buffer size according
1024          * to the order, namely, waterline of valided tc, pfc disabled tc, pfc
1025          * enabled tc.
1026          */
1027         if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
1028                 return 0;
1029
1030         if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
1031                 return 0;
1032
1033         if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
1034                 return 0;
1035
1036         return -ENOMEM;
1037 }
1038
1039 static int
1040 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
1041 {
1042         struct hns3_rx_priv_buff_cmd *req;
1043         struct hns3_cmd_desc desc;
1044         uint32_t buf_size;
1045         int ret;
1046         int i;
1047
1048         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
1049         req = (struct hns3_rx_priv_buff_cmd *)desc.data;
1050
1051         /* Alloc private buffer TCs */
1052         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1053                 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
1054
1055                 req->buf_num[i] =
1056                         rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
1057                 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
1058         }
1059
1060         buf_size = buf_alloc->s_buf.buf_size;
1061         req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
1062                                            (1 << HNS3_TC0_PRI_BUF_EN_B));
1063
1064         ret = hns3_cmd_send(hw, &desc, 1);
1065         if (ret)
1066                 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
1067
1068         return ret;
1069 }
1070
1071 static int
1072 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
1073 {
1074 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
1075         struct hns3_rx_priv_wl_buf *req;
1076         struct hns3_priv_buf *priv;
1077         struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
1078         int i, j;
1079         int ret;
1080
1081         for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
1082                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
1083                                           false);
1084                 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
1085
1086                 /* The first descriptor set the NEXT bit to 1 */
1087                 if (i == 0)
1088                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1089                 else
1090                         desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1091
1092                 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
1093                         uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
1094
1095                         priv = &buf_alloc->priv_buf[idx];
1096                         req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
1097                                                         HNS3_BUF_UNIT_S);
1098                         req->tc_wl[j].high |=
1099                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1100                         req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
1101                                                         HNS3_BUF_UNIT_S);
1102                         req->tc_wl[j].low |=
1103                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1104                 }
1105         }
1106
1107         /* Send 2 descriptor at one time */
1108         ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
1109         if (ret)
1110                 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
1111                              ret);
1112         return ret;
1113 }
1114
1115 static int
1116 hns3_common_thrd_config(struct hns3_hw *hw,
1117                         struct hns3_pkt_buf_alloc *buf_alloc)
1118 {
1119 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
1120         struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
1121         struct hns3_rx_com_thrd *req;
1122         struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
1123         struct hns3_tc_thrd *tc;
1124         int tc_idx;
1125         int i, j;
1126         int ret;
1127
1128         for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
1129                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
1130                                           false);
1131                 req = (struct hns3_rx_com_thrd *)&desc[i].data;
1132
1133                 /* The first descriptor set the NEXT bit to 1 */
1134                 if (i == 0)
1135                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1136                 else
1137                         desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1138
1139                 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
1140                         tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
1141                         tc = &s_buf->tc_thrd[tc_idx];
1142
1143                         req->com_thrd[j].high =
1144                                 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
1145                         req->com_thrd[j].high |=
1146                                  rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1147                         req->com_thrd[j].low =
1148                                 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
1149                         req->com_thrd[j].low |=
1150                                  rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1151                 }
1152         }
1153
1154         /* Send 2 descriptors at one time */
1155         ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
1156         if (ret)
1157                 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
1158
1159         return ret;
1160 }
1161
1162 static int
1163 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
1164 {
1165         struct hns3_shared_buf *buf = &buf_alloc->s_buf;
1166         struct hns3_rx_com_wl *req;
1167         struct hns3_cmd_desc desc;
1168         int ret;
1169
1170         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
1171
1172         req = (struct hns3_rx_com_wl *)desc.data;
1173         req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
1174         req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1175
1176         req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
1177         req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1178
1179         ret = hns3_cmd_send(hw, &desc, 1);
1180         if (ret)
1181                 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
1182
1183         return ret;
1184 }
1185
1186 int
1187 hns3_buffer_alloc(struct hns3_hw *hw)
1188 {
1189         struct hns3_pkt_buf_alloc pkt_buf;
1190         int ret;
1191
1192         memset(&pkt_buf, 0, sizeof(pkt_buf));
1193         ret = hns3_tx_buffer_calc(hw, &pkt_buf);
1194         if (ret) {
1195                 PMD_INIT_LOG(ERR,
1196                              "could not calc tx buffer size for all TCs %d",
1197                              ret);
1198                 return ret;
1199         }
1200
1201         ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
1202         if (ret) {
1203                 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
1204                 return ret;
1205         }
1206
1207         ret = hns3_rx_buffer_calc(hw, &pkt_buf);
1208         if (ret) {
1209                 PMD_INIT_LOG(ERR,
1210                              "could not calc rx priv buffer size for all TCs %d",
1211                              ret);
1212                 return ret;
1213         }
1214
1215         ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
1216         if (ret) {
1217                 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
1218                 return ret;
1219         }
1220
1221         if (hns3_dev_dcb_supported(hw)) {
1222                 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
1223                 if (ret) {
1224                         PMD_INIT_LOG(ERR,
1225                                      "could not configure rx private waterline %d",
1226                                      ret);
1227                         return ret;
1228                 }
1229
1230                 ret = hns3_common_thrd_config(hw, &pkt_buf);
1231                 if (ret) {
1232                         PMD_INIT_LOG(ERR,
1233                                      "could not configure common threshold %d",
1234                                      ret);
1235                         return ret;
1236                 }
1237         }
1238
1239         ret = hns3_common_wl_config(hw, &pkt_buf);
1240         if (ret)
1241                 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
1242                              ret);
1243
1244         return ret;
1245 }
1246
1247 static int
1248 hns3_mac_init(struct hns3_hw *hw)
1249 {
1250         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1251         struct hns3_mac *mac = &hw->mac;
1252         struct hns3_pf *pf = &hns->pf;
1253         int ret;
1254
1255         pf->support_sfp_query = true;
1256         mac->link_duplex = ETH_LINK_FULL_DUPLEX;
1257         ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
1258         if (ret) {
1259                 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
1260                 return ret;
1261         }
1262
1263         mac->link_status = ETH_LINK_DOWN;
1264
1265         return hns3_config_mtu(hw, pf->mps);
1266 }
1267
1268 static int
1269 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
1270 {
1271 #define HNS3_ETHERTYPE_SUCCESS_ADD              0
1272 #define HNS3_ETHERTYPE_ALREADY_ADD              1
1273 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW         2
1274 #define HNS3_ETHERTYPE_KEY_CONFLICT             3
1275         int return_status;
1276
1277         if (cmdq_resp) {
1278                 PMD_INIT_LOG(ERR,
1279                              "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
1280                              cmdq_resp);
1281                 return -EIO;
1282         }
1283
1284         switch (resp_code) {
1285         case HNS3_ETHERTYPE_SUCCESS_ADD:
1286         case HNS3_ETHERTYPE_ALREADY_ADD:
1287                 return_status = 0;
1288                 break;
1289         case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
1290                 PMD_INIT_LOG(ERR,
1291                              "add mac ethertype failed for manager table overflow.");
1292                 return_status = -EIO;
1293                 break;
1294         case HNS3_ETHERTYPE_KEY_CONFLICT:
1295                 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
1296                 return_status = -EIO;
1297                 break;
1298         default:
1299                 PMD_INIT_LOG(ERR,
1300                              "add mac ethertype failed for undefined, code=%d.",
1301                              resp_code);
1302                 return_status = -EIO;
1303         }
1304
1305         return return_status;
1306 }
1307
1308 static int
1309 hns3_add_mgr_tbl(struct hns3_hw *hw,
1310                  const struct hns3_mac_mgr_tbl_entry_cmd *req)
1311 {
1312         struct hns3_cmd_desc desc;
1313         uint8_t resp_code;
1314         uint16_t retval;
1315         int ret;
1316
1317         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
1318         memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
1319
1320         ret = hns3_cmd_send(hw, &desc, 1);
1321         if (ret) {
1322                 PMD_INIT_LOG(ERR,
1323                              "add mac ethertype failed for cmd_send, ret =%d.",
1324                              ret);
1325                 return ret;
1326         }
1327
1328         resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1329         retval = rte_le_to_cpu_16(desc.retval);
1330
1331         return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
1332 }
1333
1334 static void
1335 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
1336                      int *table_item_num)
1337 {
1338         struct hns3_mac_mgr_tbl_entry_cmd *tbl;
1339
1340         /*
1341          * In current version, we add one item in management table as below:
1342          * 0x0180C200000E -- LLDP MC address
1343          */
1344         tbl = mgr_table;
1345         tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
1346         tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
1347         tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
1348         tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
1349         tbl->i_port_bitmap = 0x1;
1350         *table_item_num = 1;
1351 }
1352
1353 static int
1354 hns3_init_mgr_tbl(struct hns3_hw *hw)
1355 {
1356 #define HNS_MAC_MGR_TBL_MAX_SIZE        16
1357         struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
1358         int table_item_num;
1359         int ret;
1360         int i;
1361
1362         memset(mgr_table, 0, sizeof(mgr_table));
1363         hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
1364         for (i = 0; i < table_item_num; i++) {
1365                 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
1366                 if (ret) {
1367                         PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
1368                                      ret);
1369                         return ret;
1370                 }
1371         }
1372
1373         return 0;
1374 }
1375
1376 static void
1377 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
1378                         bool en_mc, bool en_bc, int vport_id)
1379 {
1380         if (!param)
1381                 return;
1382
1383         memset(param, 0, sizeof(struct hns3_promisc_param));
1384         if (en_uc)
1385                 param->enable = HNS3_PROMISC_EN_UC;
1386         if (en_mc)
1387                 param->enable |= HNS3_PROMISC_EN_MC;
1388         if (en_bc)
1389                 param->enable |= HNS3_PROMISC_EN_BC;
1390         param->vf_id = vport_id;
1391 }
1392
1393 static int
1394 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
1395 {
1396         struct hns3_promisc_cfg_cmd *req;
1397         struct hns3_cmd_desc desc;
1398         int ret;
1399
1400         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
1401
1402         req = (struct hns3_promisc_cfg_cmd *)desc.data;
1403         req->vf_id = param->vf_id;
1404         req->flag = (param->enable << HNS3_PROMISC_EN_B) |
1405             HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
1406
1407         ret = hns3_cmd_send(hw, &desc, 1);
1408         if (ret)
1409                 PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret);
1410
1411         return ret;
1412 }
1413
1414 static int
1415 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
1416 {
1417         struct hns3_promisc_param param;
1418         bool en_bc_pmc = true;
1419         uint8_t vf_id;
1420         int ret;
1421
1422         /*
1423          * In current version VF is not supported when PF is driven by DPDK
1424          * driver, the PF-related vf_id is 0, just need to configure parameters
1425          * for vf_id 0.
1426          */
1427         vf_id = 0;
1428
1429         hns3_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
1430         ret = hns3_cmd_set_promisc_mode(hw, &param);
1431         if (ret)
1432                 return ret;
1433
1434         return 0;
1435 }
1436
1437 static int
1438 hns3_init_hardware(struct hns3_adapter *hns)
1439 {
1440         struct hns3_hw *hw = &hns->hw;
1441         int ret;
1442
1443         ret = hns3_map_tqp(hw);
1444         if (ret) {
1445                 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
1446                 return ret;
1447         }
1448
1449         ret = hns3_init_umv_space(hw);
1450         if (ret) {
1451                 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
1452                 return ret;
1453         }
1454
1455         ret = hns3_mac_init(hw);
1456         if (ret) {
1457                 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
1458                 goto err_mac_init;
1459         }
1460
1461         ret = hns3_init_mgr_tbl(hw);
1462         if (ret) {
1463                 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
1464                 goto err_mac_init;
1465         }
1466
1467         ret = hns3_set_promisc_mode(hw, false, false);
1468         if (ret) {
1469                 PMD_INIT_LOG(ERR, "Failed to set promisc mode: %d", ret);
1470                 goto err_mac_init;
1471         }
1472
1473         ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
1474         if (ret) {
1475                 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
1476                 goto err_mac_init;
1477         }
1478
1479         ret = hns3_config_gro(hw, false);
1480         if (ret) {
1481                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1482                 goto err_mac_init;
1483         }
1484         return 0;
1485
1486 err_mac_init:
1487         hns3_uninit_umv_space(hw);
1488         return ret;
1489 }
1490
1491 static int
1492 hns3_init_pf(struct rte_eth_dev *eth_dev)
1493 {
1494         struct rte_device *dev = eth_dev->device;
1495         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
1496         struct hns3_adapter *hns = eth_dev->data->dev_private;
1497         struct hns3_hw *hw = &hns->hw;
1498         int ret;
1499
1500         PMD_INIT_FUNC_TRACE();
1501
1502         /* Get hardware io base address from pcie BAR2 IO space */
1503         hw->io_base = pci_dev->mem_resource[2].addr;
1504
1505         /* Firmware command queue initialize */
1506         ret = hns3_cmd_init_queue(hw);
1507         if (ret) {
1508                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1509                 goto err_cmd_init_queue;
1510         }
1511
1512         /* Firmware command initialize */
1513         ret = hns3_cmd_init(hw);
1514         if (ret) {
1515                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1516                 goto err_cmd_init;
1517         }
1518
1519         /* Get configuration */
1520         ret = hns3_get_configuration(hw);
1521         if (ret) {
1522                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1523                 goto err_get_config;
1524         }
1525
1526         ret = hns3_init_hardware(hns);
1527         if (ret) {
1528                 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
1529                 goto err_get_config;
1530         }
1531
1532         return 0;
1533
1534 err_get_config:
1535         hns3_cmd_uninit(hw);
1536
1537 err_cmd_init:
1538         hns3_cmd_destroy_queue(hw);
1539
1540 err_cmd_init_queue:
1541         hw->io_base = NULL;
1542
1543         return ret;
1544 }
1545
1546 static void
1547 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
1548 {
1549         struct hns3_adapter *hns = eth_dev->data->dev_private;
1550         struct hns3_hw *hw = &hns->hw;
1551
1552         PMD_INIT_FUNC_TRACE();
1553
1554         hns3_uninit_umv_space(hw);
1555         hns3_cmd_uninit(hw);
1556         hns3_cmd_destroy_queue(hw);
1557         hw->io_base = NULL;
1558 }
1559
1560 static void
1561 hns3_dev_close(struct rte_eth_dev *eth_dev)
1562 {
1563         struct hns3_adapter *hns = eth_dev->data->dev_private;
1564         struct hns3_hw *hw = &hns->hw;
1565
1566         hw->adapter_state = HNS3_NIC_CLOSING;
1567         hns3_uninit_pf(eth_dev);
1568         hw->adapter_state = HNS3_NIC_CLOSED;
1569 }
1570
1571 static const struct eth_dev_ops hns3_eth_dev_ops = {
1572         .dev_close          = hns3_dev_close,
1573 };
1574
1575 static int
1576 hns3_dev_init(struct rte_eth_dev *eth_dev)
1577 {
1578         struct hns3_adapter *hns = eth_dev->data->dev_private;
1579         struct hns3_hw *hw = &hns->hw;
1580         int ret;
1581
1582         PMD_INIT_FUNC_TRACE();
1583
1584         eth_dev->dev_ops = &hns3_eth_dev_ops;
1585         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1586                 return 0;
1587
1588         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
1589         hns->is_vf = false;
1590         hw->data = eth_dev->data;
1591
1592         /*
1593          * Set default max packet size according to the mtu
1594          * default vale in DPDK frame.
1595          */
1596         hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
1597
1598         ret = hns3_init_pf(eth_dev);
1599         if (ret) {
1600                 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
1601                 goto err_init_pf;
1602         }
1603
1604         /* Allocate memory for storing MAC addresses */
1605         eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac",
1606                                                sizeof(struct rte_ether_addr) *
1607                                                HNS3_UC_MACADDR_NUM, 0);
1608         if (eth_dev->data->mac_addrs == NULL) {
1609                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
1610                              "to store MAC addresses",
1611                              sizeof(struct rte_ether_addr) *
1612                              HNS3_UC_MACADDR_NUM);
1613                 ret = -ENOMEM;
1614                 goto err_rte_zmalloc;
1615         }
1616
1617         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
1618                             &eth_dev->data->mac_addrs[0]);
1619
1620         hw->adapter_state = HNS3_NIC_INITIALIZED;
1621         /*
1622          * Pass the information to the rte_eth_dev_close() that it should also
1623          * release the private port resources.
1624          */
1625         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1626
1627         hns3_info(hw, "hns3 dev initialization successful!");
1628         return 0;
1629
1630 err_rte_zmalloc:
1631         hns3_uninit_pf(eth_dev);
1632
1633 err_init_pf:
1634         eth_dev->dev_ops = NULL;
1635         eth_dev->rx_pkt_burst = NULL;
1636         eth_dev->tx_pkt_burst = NULL;
1637         eth_dev->tx_pkt_prepare = NULL;
1638         return ret;
1639 }
1640
1641 static int
1642 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
1643 {
1644         struct hns3_adapter *hns = eth_dev->data->dev_private;
1645         struct hns3_hw *hw = &hns->hw;
1646
1647         PMD_INIT_FUNC_TRACE();
1648
1649         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1650                 return -EPERM;
1651
1652         eth_dev->dev_ops = NULL;
1653         eth_dev->rx_pkt_burst = NULL;
1654         eth_dev->tx_pkt_burst = NULL;
1655         eth_dev->tx_pkt_prepare = NULL;
1656         if (hw->adapter_state < HNS3_NIC_CLOSING)
1657                 hns3_dev_close(eth_dev);
1658
1659         hw->adapter_state = HNS3_NIC_REMOVED;
1660         return 0;
1661 }
1662
1663 static int
1664 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1665                    struct rte_pci_device *pci_dev)
1666 {
1667         return rte_eth_dev_pci_generic_probe(pci_dev,
1668                                              sizeof(struct hns3_adapter),
1669                                              hns3_dev_init);
1670 }
1671
1672 static int
1673 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
1674 {
1675         return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
1676 }
1677
1678 static const struct rte_pci_id pci_id_hns3_map[] = {
1679         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
1680         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
1681         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
1682         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
1683         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
1684         { .vendor_id = 0, /* sentinel */ },
1685 };
1686
1687 static struct rte_pci_driver rte_hns3_pmd = {
1688         .id_table = pci_id_hns3_map,
1689         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1690         .probe = eth_hns3_pci_probe,
1691         .remove = eth_hns3_pci_remove,
1692 };
1693
1694 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
1695 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
1696 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
1697
1698 RTE_INIT(hns3_init_log)
1699 {
1700         hns3_logtype_init = rte_log_register("pmd.net.hns3.init");
1701         if (hns3_logtype_init >= 0)
1702                 rte_log_set_level(hns3_logtype_init, RTE_LOG_NOTICE);
1703         hns3_logtype_driver = rte_log_register("pmd.net.hns3.driver");
1704         if (hns3_logtype_driver >= 0)
1705                 rte_log_set_level(hns3_logtype_driver, RTE_LOG_NOTICE);
1706 }