net/bnxt: add ring group alloc/free
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_ring.h"
47 #include "bnxt_txq.h"
48 #include "bnxt_vnic.h"
49 #include "hsi_struct_def_dpdk.h"
50
51 #define HWRM_CMD_TIMEOUT                2000
52
53 /*
54  * HWRM Functions (sent to HWRM)
55  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
56  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
57  * command was failed by the ChiMP.
58  */
59
60 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
61                                         uint32_t msg_len)
62 {
63         unsigned int i;
64         struct input *req = msg;
65         struct output *resp = bp->hwrm_cmd_resp_addr;
66         uint32_t *data = msg;
67         uint8_t *bar;
68         uint8_t *valid;
69
70         /* Write request msg to hwrm channel */
71         for (i = 0; i < msg_len; i += 4) {
72                 bar = (uint8_t *)bp->bar0 + i;
73                 *(volatile uint32_t *)bar = *data;
74                 data++;
75         }
76
77         /* Zero the rest of the request space */
78         for (; i < bp->max_req_len; i += 4) {
79                 bar = (uint8_t *)bp->bar0 + i;
80                 *(volatile uint32_t *)bar = 0;
81         }
82
83         /* Ring channel doorbell */
84         bar = (uint8_t *)bp->bar0 + 0x100;
85         *(volatile uint32_t *)bar = 1;
86
87         /* Poll for the valid bit */
88         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
89                 /* Sanity check on the resp->resp_len */
90                 rte_rmb();
91                 if (resp->resp_len && resp->resp_len <=
92                                 bp->max_resp_len) {
93                         /* Last byte of resp contains the valid key */
94                         valid = (uint8_t *)resp + resp->resp_len - 1;
95                         if (*valid == HWRM_RESP_VALID_KEY)
96                                 break;
97                 }
98                 rte_delay_us(600);
99         }
100
101         if (i >= HWRM_CMD_TIMEOUT) {
102                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
103                         req->req_type);
104                 goto err_ret;
105         }
106         return 0;
107
108 err_ret:
109         return -1;
110 }
111
112 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
113 {
114         int rc;
115
116         rte_spinlock_lock(&bp->hwrm_lock);
117         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
118         rte_spinlock_unlock(&bp->hwrm_lock);
119         return rc;
120 }
121
122 #define HWRM_PREP(req, type, cr, resp) \
123         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
124         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
125         req.cmpl_ring = rte_cpu_to_le_16(cr); \
126         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
127         req.target_id = rte_cpu_to_le_16(0xffff); \
128         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
129
130 #define HWRM_CHECK_RESULT \
131         { \
132                 if (rc) { \
133                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
134                                 __func__, rc); \
135                         return rc; \
136                 } \
137                 if (resp->error_code) { \
138                         rc = rte_le_to_cpu_16(resp->error_code); \
139                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
140                         return rc; \
141                 } \
142         }
143
144 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
145 {
146         int rc = 0;
147         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
148         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
149
150         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
151         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
152         req.mask = 0;
153
154         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
155
156         HWRM_CHECK_RESULT;
157
158         return rc;
159 }
160
161 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
162 {
163         int rc = 0;
164         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
165         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
166         uint32_t mask = 0;
167
168         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
169         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
170
171         /* FIXME add multicast flag, when multicast adding options is supported
172          * by ethtool.
173          */
174         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
175                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
176         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
177                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
178         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST |
179                                     HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
180                                     mask);
181
182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
183
184         HWRM_CHECK_RESULT;
185
186         return rc;
187 }
188
189 int bnxt_hwrm_clear_filter(struct bnxt *bp,
190                            struct bnxt_filter_info *filter)
191 {
192         int rc = 0;
193         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
194         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
195
196         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
197
198         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
199
200         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
201
202         HWRM_CHECK_RESULT;
203
204         filter->fw_l2_filter_id = -1;
205
206         return 0;
207 }
208
209 int bnxt_hwrm_set_filter(struct bnxt *bp,
210                          struct bnxt_vnic_info *vnic,
211                          struct bnxt_filter_info *filter)
212 {
213         int rc = 0;
214         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
215         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
216         uint32_t enables = 0;
217
218         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
219
220         req.flags = rte_cpu_to_le_32(filter->flags);
221
222         enables = filter->enables |
223               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
224         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
225
226         if (enables &
227             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
228                 memcpy(req.l2_addr, filter->l2_addr,
229                        ETHER_ADDR_LEN);
230         if (enables &
231             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
232                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
233                        ETHER_ADDR_LEN);
234         if (enables &
235             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
236                 req.l2_ovlan = filter->l2_ovlan;
237         if (enables &
238             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
239                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
240
241         req.enables = rte_cpu_to_le_32(enables);
242
243         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
244
245         HWRM_CHECK_RESULT;
246
247         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
248
249         return rc;
250 }
251
252 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
253 {
254         int rc;
255         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
256         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
257
258         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
259
260         memcpy(req.encap_request, fwd_cmd,
261                sizeof(req.encap_request));
262
263         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
264
265         HWRM_CHECK_RESULT;
266
267         return rc;
268 }
269
270 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
271 {
272         int rc = 0;
273         struct hwrm_func_qcaps_input req = {.req_type = 0 };
274         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
275
276         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
277
278         req.fid = rte_cpu_to_le_16(0xffff);
279
280         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
281
282         HWRM_CHECK_RESULT;
283
284         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
285         if (BNXT_PF(bp)) {
286                 struct bnxt_pf_info *pf = &bp->pf;
287
288                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
289                 pf->port_id = resp->port_id;
290                 memcpy(pf->mac_addr, resp->perm_mac_address, ETHER_ADDR_LEN);
291                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
292                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
293                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
294                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
295                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
296                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
297                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
298                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
299         } else {
300                 struct bnxt_vf_info *vf = &bp->vf;
301
302                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
303                 memcpy(vf->mac_addr, &resp->perm_mac_address, ETHER_ADDR_LEN);
304                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
305                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
306                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
307                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
308                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
309                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
310         }
311
312         return rc;
313 }
314
315 int bnxt_hwrm_func_reset(struct bnxt *bp)
316 {
317         int rc = 0;
318         struct hwrm_func_reset_input req = {.req_type = 0 };
319         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
320
321         HWRM_PREP(req, FUNC_RESET, -1, resp);
322
323         req.enables = rte_cpu_to_le_32(0);
324
325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
326
327         HWRM_CHECK_RESULT;
328
329         return rc;
330 }
331
332 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
333                                    uint32_t *vf_req_fwd)
334 {
335         int rc;
336         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
337         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
338
339         if (bp->flags & BNXT_FLAG_REGISTERED)
340                 return 0;
341
342         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
343         req.flags = flags;
344         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER;
345         req.ver_maj = RTE_VER_YEAR;
346         req.ver_min = RTE_VER_MONTH;
347         req.ver_upd = RTE_VER_MINOR;
348
349         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
350
351         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
352
353         HWRM_CHECK_RESULT;
354
355         bp->flags |= BNXT_FLAG_REGISTERED;
356
357         return rc;
358 }
359
360 int bnxt_hwrm_ver_get(struct bnxt *bp)
361 {
362         int rc = 0;
363         struct hwrm_ver_get_input req = {.req_type = 0 };
364         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
365         uint32_t my_version;
366         uint32_t fw_version;
367         uint16_t max_resp_len;
368         char type[RTE_MEMZONE_NAMESIZE];
369
370         HWRM_PREP(req, VER_GET, -1, resp);
371
372         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
373         req.hwrm_intf_min = HWRM_VERSION_MINOR;
374         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
375
376         /*
377          * Hold the lock since we may be adjusting the response pointers.
378          */
379         rte_spinlock_lock(&bp->hwrm_lock);
380         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
381
382         HWRM_CHECK_RESULT;
383
384         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
385                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
386                 resp->hwrm_intf_upd,
387                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
388
389         my_version = HWRM_VERSION_MAJOR << 16;
390         my_version |= HWRM_VERSION_MINOR << 8;
391         my_version |= HWRM_VERSION_UPDATE;
392
393         fw_version = resp->hwrm_intf_maj << 16;
394         fw_version |= resp->hwrm_intf_min << 8;
395         fw_version |= resp->hwrm_intf_upd;
396
397         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
398                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
399                 rc = -EINVAL;
400                 goto error;
401         }
402
403         if (my_version != fw_version) {
404                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
405                 if (my_version < fw_version) {
406                         RTE_LOG(INFO, PMD,
407                                 "Firmware API version is newer than driver.\n");
408                         RTE_LOG(INFO, PMD,
409                                 "The driver may be missing features.\n");
410                 } else {
411                         RTE_LOG(INFO, PMD,
412                                 "Firmware API version is older than driver.\n");
413                         RTE_LOG(INFO, PMD,
414                                 "Not all driver features may be functional.\n");
415                 }
416         }
417
418         if (bp->max_req_len > resp->max_req_win_len) {
419                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
420                 rc = -EINVAL;
421         }
422         bp->max_req_len = resp->max_req_win_len;
423         max_resp_len = resp->max_resp_len;
424         if (bp->max_resp_len != max_resp_len) {
425                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
426                         bp->pdev->addr.domain, bp->pdev->addr.bus,
427                         bp->pdev->addr.devid, bp->pdev->addr.function);
428
429                 rte_free(bp->hwrm_cmd_resp_addr);
430
431                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
432                 if (bp->hwrm_cmd_resp_addr == NULL) {
433                         rc = -ENOMEM;
434                         goto error;
435                 }
436                 bp->hwrm_cmd_resp_dma_addr =
437                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
438                 bp->max_resp_len = max_resp_len;
439         }
440
441 error:
442         rte_spinlock_unlock(&bp->hwrm_lock);
443         return rc;
444 }
445
446 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
447 {
448         int rc;
449         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
450         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
451
452         if (!(bp->flags & BNXT_FLAG_REGISTERED))
453                 return 0;
454
455         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
456         req.flags = flags;
457
458         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
459
460         HWRM_CHECK_RESULT;
461
462         bp->flags &= ~BNXT_FLAG_REGISTERED;
463
464         return rc;
465 }
466
467 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
468 {
469         int rc = 0;
470         struct hwrm_port_phy_cfg_input req = {.req_type = 0};
471         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
472
473         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
474
475         req.flags = conf->phy_flags;
476         if (conf->link_up) {
477                 req.force_link_speed = conf->link_speed;
478                 /*
479                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
480                  * any auto mode, even "none".
481                  */
482                 if (req.auto_mode == HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE) {
483                         req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
484                 } else {
485                         req.auto_mode = conf->auto_mode;
486                         req.enables |=
487                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
488                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
489                         req.enables |=
490                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
491                         req.auto_link_speed = conf->auto_link_speed;
492                         req.enables |=
493                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
494                 }
495                 req.auto_duplex = conf->duplex;
496                 req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
497                 req.auto_pause = conf->auto_pause;
498                 /* Set force_pause if there is no auto or if there is a force */
499                 if (req.auto_pause)
500                         req.enables |=
501                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
502                 else
503                         req.enables |=
504                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
505                 req.force_pause = conf->force_pause;
506                 if (req.force_pause)
507                         req.enables |=
508                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
509         } else {
510                 req.flags &= ~HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
511                 req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN;
512                 req.force_link_speed = 0;
513         }
514
515         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
516
517         HWRM_CHECK_RESULT;
518
519         return rc;
520 }
521
522 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
523 {
524         int rc = 0;
525         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
526         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
527
528         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
529
530         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
531
532         HWRM_CHECK_RESULT;
533
534 #define GET_QUEUE_INFO(x) \
535         bp->cos_queue[x].id = resp->queue_id##x; \
536         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
537
538         GET_QUEUE_INFO(0);
539         GET_QUEUE_INFO(1);
540         GET_QUEUE_INFO(2);
541         GET_QUEUE_INFO(3);
542         GET_QUEUE_INFO(4);
543         GET_QUEUE_INFO(5);
544         GET_QUEUE_INFO(6);
545         GET_QUEUE_INFO(7);
546
547         return rc;
548 }
549
550 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
551                          struct bnxt_ring *ring,
552                          uint32_t ring_type, uint32_t map_index,
553                          uint32_t stats_ctx_id)
554 {
555         int rc = 0;
556         struct hwrm_ring_alloc_input req = {.req_type = 0 };
557         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
558
559         HWRM_PREP(req, RING_ALLOC, -1, resp);
560
561         req.enables = rte_cpu_to_le_32(0);
562
563         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
564         req.fbo = rte_cpu_to_le_32(0);
565         /* Association of ring index with doorbell index */
566         req.logical_id = rte_cpu_to_le_16(map_index);
567
568         switch (ring_type) {
569         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
570                 req.queue_id = bp->cos_queue[0].id;
571         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
572                 req.ring_type = ring_type;
573                 req.cmpl_ring_id =
574                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
575                 req.length = rte_cpu_to_le_32(ring->ring_size);
576                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
577                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
578                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
579                 break;
580         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
581                 req.ring_type = ring_type;
582                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_POLL;
583                 req.length = rte_cpu_to_le_32(ring->ring_size);
584                 break;
585         default:
586                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
587                         ring_type);
588                 return -1;
589         }
590
591         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
592
593         if (rc || resp->error_code) {
594                 if (rc == 0 && resp->error_code)
595                         rc = rte_le_to_cpu_16(resp->error_code);
596                 switch (ring_type) {
597                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
598                         RTE_LOG(ERR, PMD,
599                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
600                         return rc;
601                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
602                         RTE_LOG(ERR, PMD,
603                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
604                         return rc;
605                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
606                         RTE_LOG(ERR, PMD,
607                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
608                         return rc;
609                 default:
610                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
611                         return rc;
612                 }
613         }
614
615         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
616         return rc;
617 }
618
619 int bnxt_hwrm_ring_free(struct bnxt *bp,
620                         struct bnxt_ring *ring, uint32_t ring_type)
621 {
622         int rc;
623         struct hwrm_ring_free_input req = {.req_type = 0 };
624         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
625
626         HWRM_PREP(req, RING_FREE, -1, resp);
627
628         req.ring_type = ring_type;
629         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
630
631         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
632
633         if (rc || resp->error_code) {
634                 if (rc == 0 && resp->error_code)
635                         rc = rte_le_to_cpu_16(resp->error_code);
636
637                 switch (ring_type) {
638                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
639                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
640                                 rc);
641                         return rc;
642                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
643                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
644                                 rc);
645                         return rc;
646                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
647                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
648                                 rc);
649                         return rc;
650                 default:
651                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
652                         return rc;
653                 }
654         }
655         return 0;
656 }
657
658 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
659 {
660         int rc = 0;
661         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
662         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
663
664         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
665
666         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
667         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
668         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
669         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
670
671         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
672
673         HWRM_CHECK_RESULT;
674
675         bp->grp_info[idx].fw_grp_id =
676             rte_le_to_cpu_16(resp->ring_group_id);
677
678         return rc;
679 }
680
681 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
682 {
683         int rc;
684         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
685         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
686
687         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
688
689         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
690
691         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
692
693         HWRM_CHECK_RESULT;
694
695         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
696         return rc;
697 }
698
699 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
700 {
701         int rc = 0;
702         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
703         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
704
705         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
706
707         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
708                 return rc;
709
710         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
711         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
712
713         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
714
715         HWRM_CHECK_RESULT;
716
717         return rc;
718 }
719
720 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
721                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
722 {
723         int rc;
724         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
725         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
726
727         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
728
729         req.update_period_ms = rte_cpu_to_le_32(1000);
730
731         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
732         req.stats_dma_addr =
733             rte_cpu_to_le_64(cpr->hw_stats_map);
734
735         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
736
737         HWRM_CHECK_RESULT;
738
739         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
740         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
741
742         return rc;
743 }
744
745 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
746 {
747         int rc = 0, i, j;
748         struct hwrm_vnic_alloc_input req = {.req_type = 0 };
749         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
750
751         /* map ring groups to this vnic */
752         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
753                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
754                         RTE_LOG(ERR, PMD,
755                                 "Not enough ring groups avail:%x req:%x\n", j,
756                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
757                         break;
758                 }
759                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
760         }
761
762         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
763         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
764
765         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
766
767         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
768
769         HWRM_CHECK_RESULT;
770
771         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
772         return rc;
773 }
774
775 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
776 {
777         int rc = 0;
778         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
779         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
780
781         HWRM_PREP(req, VNIC_CFG, -1, resp);
782
783         /* Only RSS support for now TBD: COS & LB */
784         req.enables =
785             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
786                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
787                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
788         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
789         req.dflt_ring_grp =
790                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
791         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
792         req.cos_rule = rte_cpu_to_le_16(0xffff);
793         req.lb_rule = rte_cpu_to_le_16(0xffff);
794         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
795                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
796         if (vnic->func_default)
797                 req.flags = 1;
798         if (vnic->vlan_strip)
799                 req.flags |=
800                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
801
802         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
803
804         HWRM_CHECK_RESULT;
805
806         return rc;
807 }
808
809 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
810 {
811         int rc = 0;
812         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
813         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
814                                                 bp->hwrm_cmd_resp_addr;
815
816         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
817
818         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
819
820         HWRM_CHECK_RESULT;
821
822         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
823
824         return rc;
825 }
826
827 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
828 {
829         int rc = 0;
830         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
831         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
832                                                 bp->hwrm_cmd_resp_addr;
833
834         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
835
836         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
837
838         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
839
840         HWRM_CHECK_RESULT;
841
842         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
843
844         return rc;
845 }
846
847 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
848 {
849         int rc = 0;
850         struct hwrm_vnic_free_input req = {.req_type = 0 };
851         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
852
853         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
854                 return rc;
855
856         HWRM_PREP(req, VNIC_FREE, -1, resp);
857
858         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
859
860         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
861
862         HWRM_CHECK_RESULT;
863
864         vnic->fw_vnic_id = INVALID_HW_RING_ID;
865         return rc;
866 }
867
868 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
869                            struct bnxt_vnic_info *vnic)
870 {
871         int rc = 0;
872         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
873         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
874
875         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
876
877         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
878
879         req.ring_grp_tbl_addr =
880             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
881         req.hash_key_tbl_addr =
882             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
883         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
884
885         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
886
887         HWRM_CHECK_RESULT;
888
889         return rc;
890 }
891
892 /*
893  * HWRM utility functions
894  */
895
896 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
897 {
898         unsigned int i;
899         int rc = 0;
900
901         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
902                 struct bnxt_tx_queue *txq;
903                 struct bnxt_rx_queue *rxq;
904                 struct bnxt_cp_ring_info *cpr;
905
906                 if (i >= bp->rx_cp_nr_rings) {
907                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
908                         cpr = txq->cp_ring;
909                 } else {
910                         rxq = bp->rx_queues[i];
911                         cpr = rxq->cp_ring;
912                 }
913
914                 rc = bnxt_hwrm_stat_clear(bp, cpr);
915                 if (rc)
916                         return rc;
917         }
918         return 0;
919 }
920
921 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
922 {
923         unsigned int i;
924         int rc = 0;
925
926         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
927                 struct bnxt_tx_queue *txq;
928                 struct bnxt_rx_queue *rxq;
929                 struct bnxt_cp_ring_info *cpr;
930                 unsigned int idx = i + 1;
931
932                 if (i >= bp->rx_cp_nr_rings) {
933                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
934                         cpr = txq->cp_ring;
935                 } else {
936                         rxq = bp->rx_queues[i];
937                         cpr = rxq->cp_ring;
938                 }
939
940                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
941
942                 if (rc)
943                         return rc;
944         }
945         return rc;
946 }
947
948 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
949 {
950         uint16_t i;
951         uint32_t rc = 0;
952
953         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
954                 unsigned int idx = i + 1;
955
956                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
957                         RTE_LOG(ERR, PMD,
958                                 "Attempt to free invalid ring group %d\n",
959                                 idx);
960                         continue;
961                 }
962
963                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
964
965                 if (rc)
966                         return rc;
967         }
968         return rc;
969 }
970
971 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
972 {
973         uint16_t i;
974         uint32_t rc = 0;
975
976         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
977                 unsigned int idx = i + 1;
978
979                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
980                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
981                         continue;
982
983                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
984
985                 if (rc)
986                         return rc;
987         }
988         return rc;
989 }
990
991 void bnxt_free_hwrm_resources(struct bnxt *bp)
992 {
993         /* Release memzone */
994         rte_free(bp->hwrm_cmd_resp_addr);
995         bp->hwrm_cmd_resp_addr = NULL;
996         bp->hwrm_cmd_resp_dma_addr = 0;
997 }
998
999 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1000 {
1001         struct rte_pci_device *pdev = bp->pdev;
1002         char type[RTE_MEMZONE_NAMESIZE];
1003
1004         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1005                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1006         bp->max_req_len = HWRM_MAX_REQ_LEN;
1007         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1008         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1009         if (bp->hwrm_cmd_resp_addr == NULL)
1010                 return -ENOMEM;
1011         bp->hwrm_cmd_resp_dma_addr =
1012                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1013         rte_spinlock_init(&bp->hwrm_lock);
1014
1015         return 0;
1016 }
1017
1018 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1019 {
1020         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1021
1022         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1023                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1024
1025         switch (conf_link_speed) {
1026         case ETH_LINK_SPEED_10M_HD:
1027         case ETH_LINK_SPEED_100M_HD:
1028                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1029         }
1030         return hw_link_duplex;
1031 }
1032
1033 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1034 {
1035         uint16_t eth_link_speed = 0;
1036
1037         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1038                 return ETH_LINK_SPEED_AUTONEG;
1039
1040         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1041         case ETH_LINK_SPEED_100M:
1042         case ETH_LINK_SPEED_100M_HD:
1043                 eth_link_speed =
1044                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB;
1045                 break;
1046         case ETH_LINK_SPEED_1G:
1047                 eth_link_speed =
1048                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1049                 break;
1050         case ETH_LINK_SPEED_2_5G:
1051                 eth_link_speed =
1052                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1053                 break;
1054         case ETH_LINK_SPEED_10G:
1055                 eth_link_speed =
1056                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1057                 break;
1058         case ETH_LINK_SPEED_20G:
1059                 eth_link_speed =
1060                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1061                 break;
1062         case ETH_LINK_SPEED_25G:
1063                 eth_link_speed =
1064                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1065                 break;
1066         case ETH_LINK_SPEED_40G:
1067                 eth_link_speed =
1068                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1069                 break;
1070         case ETH_LINK_SPEED_50G:
1071                 eth_link_speed =
1072                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1073                 break;
1074         default:
1075                 RTE_LOG(ERR, PMD,
1076                         "Unsupported link speed %d; default to AUTO\n",
1077                         conf_link_speed);
1078                 break;
1079         }
1080         return eth_link_speed;
1081 }
1082
1083 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1084                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1085                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1086                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1087
1088 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1089 {
1090         uint32_t one_speed;
1091
1092         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1093                 return 0;
1094
1095         if (link_speed & ETH_LINK_SPEED_FIXED) {
1096                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1097
1098                 if (one_speed & (one_speed - 1)) {
1099                         RTE_LOG(ERR, PMD,
1100                                 "Invalid advertised speeds (%u) for port %u\n",
1101                                 link_speed, port_id);
1102                         return -EINVAL;
1103                 }
1104                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1105                         RTE_LOG(ERR, PMD,
1106                                 "Unsupported advertised speed (%u) for port %u\n",
1107                                 link_speed, port_id);
1108                         return -EINVAL;
1109                 }
1110         } else {
1111                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1112                         RTE_LOG(ERR, PMD,
1113                                 "Unsupported advertised speeds (%u) for port %u\n",
1114                                 link_speed, port_id);
1115                         return -EINVAL;
1116                 }
1117         }
1118         return 0;
1119 }
1120
1121 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1122 {
1123         uint16_t ret = 0;
1124
1125         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1126                 link_speed = BNXT_SUPPORTED_SPEEDS;
1127
1128         if (link_speed & ETH_LINK_SPEED_100M)
1129                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1130         if (link_speed & ETH_LINK_SPEED_100M_HD)
1131                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1132         if (link_speed & ETH_LINK_SPEED_1G)
1133                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1134         if (link_speed & ETH_LINK_SPEED_2_5G)
1135                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1136         if (link_speed & ETH_LINK_SPEED_10G)
1137                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1138         if (link_speed & ETH_LINK_SPEED_20G)
1139                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1140         if (link_speed & ETH_LINK_SPEED_25G)
1141                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1142         if (link_speed & ETH_LINK_SPEED_40G)
1143                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1144         if (link_speed & ETH_LINK_SPEED_50G)
1145                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1146         return ret;
1147 }
1148
1149 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1150 {
1151         int rc = 0;
1152         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1153         struct bnxt_link_info link_req;
1154         uint16_t speed;
1155
1156         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1157                         bp->eth_dev->data->port_id);
1158         if (rc)
1159                 goto error;
1160
1161         memset(&link_req, 0, sizeof(link_req));
1162         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1163         link_req.link_up = link_up;
1164         if (speed == 0) {
1165                 link_req.phy_flags =
1166                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1167                 link_req.auto_mode =
1168                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW;
1169                 link_req.auto_link_speed_mask =
1170                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1171                 link_req.auto_link_speed =
1172                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB;
1173         } else {
1174                 link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1175                 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE |
1176                         HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1177                 link_req.link_speed = speed;
1178         }
1179         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1180         link_req.auto_pause = bp->link_info.auto_pause;
1181         link_req.force_pause = bp->link_info.force_pause;
1182
1183         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1184         if (rc) {
1185                 RTE_LOG(ERR, PMD,
1186                         "Set link config failed with rc %d\n", rc);
1187         }
1188
1189 error:
1190         return rc;
1191 }