net/bnxt: add HWRM function reset command
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_txq.h"
47 #include "bnxt_vnic.h"
48 #include "hsi_struct_def_dpdk.h"
49
50 #define HWRM_CMD_TIMEOUT                2000
51
52 /*
53  * HWRM Functions (sent to HWRM)
54  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
55  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
56  * command was failed by the ChiMP.
57  */
58
59 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
60                                         uint32_t msg_len)
61 {
62         unsigned int i;
63         struct input *req = msg;
64         struct output *resp = bp->hwrm_cmd_resp_addr;
65         uint32_t *data = msg;
66         uint8_t *bar;
67         uint8_t *valid;
68
69         /* Write request msg to hwrm channel */
70         for (i = 0; i < msg_len; i += 4) {
71                 bar = (uint8_t *)bp->bar0 + i;
72                 *(volatile uint32_t *)bar = *data;
73                 data++;
74         }
75
76         /* Zero the rest of the request space */
77         for (; i < bp->max_req_len; i += 4) {
78                 bar = (uint8_t *)bp->bar0 + i;
79                 *(volatile uint32_t *)bar = 0;
80         }
81
82         /* Ring channel doorbell */
83         bar = (uint8_t *)bp->bar0 + 0x100;
84         *(volatile uint32_t *)bar = 1;
85
86         /* Poll for the valid bit */
87         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
88                 /* Sanity check on the resp->resp_len */
89                 rte_rmb();
90                 if (resp->resp_len && resp->resp_len <=
91                                 bp->max_resp_len) {
92                         /* Last byte of resp contains the valid key */
93                         valid = (uint8_t *)resp + resp->resp_len - 1;
94                         if (*valid == HWRM_RESP_VALID_KEY)
95                                 break;
96                 }
97                 rte_delay_us(600);
98         }
99
100         if (i >= HWRM_CMD_TIMEOUT) {
101                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
102                         req->req_type);
103                 goto err_ret;
104         }
105         return 0;
106
107 err_ret:
108         return -1;
109 }
110
111 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
112 {
113         int rc;
114
115         rte_spinlock_lock(&bp->hwrm_lock);
116         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
117         rte_spinlock_unlock(&bp->hwrm_lock);
118         return rc;
119 }
120
121 #define HWRM_PREP(req, type, cr, resp) \
122         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
123         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
124         req.cmpl_ring = rte_cpu_to_le_16(cr); \
125         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
126         req.target_id = rte_cpu_to_le_16(0xffff); \
127         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
128
129 #define HWRM_CHECK_RESULT \
130         { \
131                 if (rc) { \
132                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
133                                 __func__, rc); \
134                         return rc; \
135                 } \
136                 if (resp->error_code) { \
137                         rc = rte_le_to_cpu_16(resp->error_code); \
138                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
139                         return rc; \
140                 } \
141         }
142
143 int bnxt_hwrm_clear_filter(struct bnxt *bp,
144                            struct bnxt_filter_info *filter)
145 {
146         int rc = 0;
147         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
148         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
149
150         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
151
152         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
153
154         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
155
156         HWRM_CHECK_RESULT;
157
158         filter->fw_l2_filter_id = -1;
159
160         return 0;
161 }
162
163 int bnxt_hwrm_set_filter(struct bnxt *bp,
164                          struct bnxt_vnic_info *vnic,
165                          struct bnxt_filter_info *filter)
166 {
167         int rc = 0;
168         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
169         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
170         uint32_t enables = 0;
171
172         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
173
174         req.flags = rte_cpu_to_le_32(filter->flags);
175
176         enables = filter->enables |
177               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
178         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
179
180         if (enables &
181             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
182                 memcpy(req.l2_addr, filter->l2_addr,
183                        ETHER_ADDR_LEN);
184         if (enables &
185             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
186                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
187                        ETHER_ADDR_LEN);
188         if (enables &
189             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
190                 req.l2_ovlan = filter->l2_ovlan;
191         if (enables &
192             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
193                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
194
195         req.enables = rte_cpu_to_le_32(enables);
196
197         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
198
199         HWRM_CHECK_RESULT;
200
201         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
202
203         return rc;
204 }
205
206 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
207 {
208         int rc;
209         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
210         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
211
212         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
213
214         memcpy(req.encap_request, fwd_cmd,
215                sizeof(req.encap_request));
216
217         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
218
219         HWRM_CHECK_RESULT;
220
221         return rc;
222 }
223
224 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
225 {
226         int rc = 0;
227         struct hwrm_func_qcaps_input req = {.req_type = 0 };
228         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
229
230         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
231
232         req.fid = rte_cpu_to_le_16(0xffff);
233
234         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
235
236         HWRM_CHECK_RESULT;
237
238         if (BNXT_PF(bp)) {
239                 struct bnxt_pf_info *pf = &bp->pf;
240
241                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
242                 pf->port_id = resp->port_id;
243                 memcpy(pf->mac_addr, resp->perm_mac_address, ETHER_ADDR_LEN);
244                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
245                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
246                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
247                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
248                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
249                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
250                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
251                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
252         } else {
253                 struct bnxt_vf_info *vf = &bp->vf;
254
255                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
256                 memcpy(vf->mac_addr, &resp->perm_mac_address, ETHER_ADDR_LEN);
257                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
258                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
259                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
260                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
261                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
262                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
263         }
264
265         return rc;
266 }
267
268 int bnxt_hwrm_func_reset(struct bnxt *bp)
269 {
270         int rc = 0;
271         struct hwrm_func_reset_input req = {.req_type = 0 };
272         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
273
274         HWRM_PREP(req, FUNC_RESET, -1, resp);
275
276         req.enables = rte_cpu_to_le_32(0);
277
278         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
279
280         HWRM_CHECK_RESULT;
281
282         return rc;
283 }
284
285 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
286                                    uint32_t *vf_req_fwd)
287 {
288         int rc;
289         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
290         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
291
292         if (bp->flags & BNXT_FLAG_REGISTERED)
293                 return 0;
294
295         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
296         req.flags = flags;
297         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER;
298         req.ver_maj = RTE_VER_YEAR;
299         req.ver_min = RTE_VER_MONTH;
300         req.ver_upd = RTE_VER_MINOR;
301
302         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
303
304         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
305
306         HWRM_CHECK_RESULT;
307
308         bp->flags |= BNXT_FLAG_REGISTERED;
309
310         return rc;
311 }
312
313 int bnxt_hwrm_ver_get(struct bnxt *bp)
314 {
315         int rc = 0;
316         struct hwrm_ver_get_input req = {.req_type = 0 };
317         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
318         uint32_t my_version;
319         uint32_t fw_version;
320         uint16_t max_resp_len;
321         char type[RTE_MEMZONE_NAMESIZE];
322
323         HWRM_PREP(req, VER_GET, -1, resp);
324
325         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
326         req.hwrm_intf_min = HWRM_VERSION_MINOR;
327         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
328
329         /*
330          * Hold the lock since we may be adjusting the response pointers.
331          */
332         rte_spinlock_lock(&bp->hwrm_lock);
333         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
334
335         HWRM_CHECK_RESULT;
336
337         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
338                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
339                 resp->hwrm_intf_upd,
340                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
341
342         my_version = HWRM_VERSION_MAJOR << 16;
343         my_version |= HWRM_VERSION_MINOR << 8;
344         my_version |= HWRM_VERSION_UPDATE;
345
346         fw_version = resp->hwrm_intf_maj << 16;
347         fw_version |= resp->hwrm_intf_min << 8;
348         fw_version |= resp->hwrm_intf_upd;
349
350         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
351                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
352                 rc = -EINVAL;
353                 goto error;
354         }
355
356         if (my_version != fw_version) {
357                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
358                 if (my_version < fw_version) {
359                         RTE_LOG(INFO, PMD,
360                                 "Firmware API version is newer than driver.\n");
361                         RTE_LOG(INFO, PMD,
362                                 "The driver may be missing features.\n");
363                 } else {
364                         RTE_LOG(INFO, PMD,
365                                 "Firmware API version is older than driver.\n");
366                         RTE_LOG(INFO, PMD,
367                                 "Not all driver features may be functional.\n");
368                 }
369         }
370
371         if (bp->max_req_len > resp->max_req_win_len) {
372                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
373                 rc = -EINVAL;
374         }
375         bp->max_req_len = resp->max_req_win_len;
376         max_resp_len = resp->max_resp_len;
377         if (bp->max_resp_len != max_resp_len) {
378                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
379                         bp->pdev->addr.domain, bp->pdev->addr.bus,
380                         bp->pdev->addr.devid, bp->pdev->addr.function);
381
382                 rte_free(bp->hwrm_cmd_resp_addr);
383
384                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
385                 if (bp->hwrm_cmd_resp_addr == NULL) {
386                         rc = -ENOMEM;
387                         goto error;
388                 }
389                 bp->hwrm_cmd_resp_dma_addr =
390                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
391                 bp->max_resp_len = max_resp_len;
392         }
393
394 error:
395         rte_spinlock_unlock(&bp->hwrm_lock);
396         return rc;
397 }
398
399 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
400 {
401         int rc;
402         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
403         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
404
405         if (!(bp->flags & BNXT_FLAG_REGISTERED))
406                 return 0;
407
408         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
409         req.flags = flags;
410
411         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
412
413         HWRM_CHECK_RESULT;
414
415         bp->flags &= ~BNXT_FLAG_REGISTERED;
416
417         return rc;
418 }
419
420 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
421 {
422         int rc = 0;
423         struct hwrm_port_phy_cfg_input req = {.req_type = 0};
424         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
425
426         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
427
428         req.flags = conf->phy_flags;
429         if (conf->link_up) {
430                 req.force_link_speed = conf->link_speed;
431                 /*
432                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
433                  * any auto mode, even "none".
434                  */
435                 if (req.auto_mode == HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE) {
436                         req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
437                 } else {
438                         req.auto_mode = conf->auto_mode;
439                         req.enables |=
440                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
441                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
442                         req.enables |=
443                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
444                         req.auto_link_speed = conf->auto_link_speed;
445                         req.enables |=
446                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
447                 }
448                 req.auto_duplex = conf->duplex;
449                 req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
450                 req.auto_pause = conf->auto_pause;
451                 /* Set force_pause if there is no auto or if there is a force */
452                 if (req.auto_pause)
453                         req.enables |=
454                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
455                 else
456                         req.enables |=
457                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
458                 req.force_pause = conf->force_pause;
459                 if (req.force_pause)
460                         req.enables |=
461                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
462         } else {
463                 req.flags &= ~HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
464                 req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN;
465                 req.force_link_speed = 0;
466         }
467
468         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
469
470         HWRM_CHECK_RESULT;
471
472         return rc;
473 }
474
475 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
476 {
477         int rc = 0;
478         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
479         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
480
481         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
482
483         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
484
485         HWRM_CHECK_RESULT;
486
487 #define GET_QUEUE_INFO(x) \
488         bp->cos_queue[x].id = resp->queue_id##x; \
489         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
490
491         GET_QUEUE_INFO(0);
492         GET_QUEUE_INFO(1);
493         GET_QUEUE_INFO(2);
494         GET_QUEUE_INFO(3);
495         GET_QUEUE_INFO(4);
496         GET_QUEUE_INFO(5);
497         GET_QUEUE_INFO(6);
498         GET_QUEUE_INFO(7);
499
500         return rc;
501 }
502
503 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
504 {
505         int rc = 0;
506         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
507         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
508
509         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
510
511         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
512                 return rc;
513
514         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
515         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
516
517         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
518
519         HWRM_CHECK_RESULT;
520
521         return rc;
522 }
523
524 /*
525  * HWRM utility functions
526  */
527
528 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
529 {
530         unsigned int i;
531         int rc = 0;
532
533         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
534                 struct bnxt_tx_queue *txq;
535                 struct bnxt_rx_queue *rxq;
536                 struct bnxt_cp_ring_info *cpr;
537
538                 if (i >= bp->rx_cp_nr_rings) {
539                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
540                         cpr = txq->cp_ring;
541                 } else {
542                         rxq = bp->rx_queues[i];
543                         cpr = rxq->cp_ring;
544                 }
545
546                 rc = bnxt_hwrm_stat_clear(bp, cpr);
547                 if (rc)
548                         return rc;
549         }
550         return 0;
551 }
552
553 void bnxt_free_hwrm_resources(struct bnxt *bp)
554 {
555         /* Release memzone */
556         rte_free(bp->hwrm_cmd_resp_addr);
557         bp->hwrm_cmd_resp_addr = NULL;
558         bp->hwrm_cmd_resp_dma_addr = 0;
559 }
560
561 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
562 {
563         struct rte_pci_device *pdev = bp->pdev;
564         char type[RTE_MEMZONE_NAMESIZE];
565
566         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
567                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
568         bp->max_req_len = HWRM_MAX_REQ_LEN;
569         bp->max_resp_len = HWRM_MAX_RESP_LEN;
570         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
571         if (bp->hwrm_cmd_resp_addr == NULL)
572                 return -ENOMEM;
573         bp->hwrm_cmd_resp_dma_addr =
574                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
575         rte_spinlock_init(&bp->hwrm_lock);
576
577         return 0;
578 }
579
580 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
581 {
582         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
583
584         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
585                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
586
587         switch (conf_link_speed) {
588         case ETH_LINK_SPEED_10M_HD:
589         case ETH_LINK_SPEED_100M_HD:
590                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
591         }
592         return hw_link_duplex;
593 }
594
595 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
596 {
597         uint16_t eth_link_speed = 0;
598
599         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
600                 return ETH_LINK_SPEED_AUTONEG;
601
602         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
603         case ETH_LINK_SPEED_100M:
604         case ETH_LINK_SPEED_100M_HD:
605                 eth_link_speed =
606                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB;
607                 break;
608         case ETH_LINK_SPEED_1G:
609                 eth_link_speed =
610                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
611                 break;
612         case ETH_LINK_SPEED_2_5G:
613                 eth_link_speed =
614                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
615                 break;
616         case ETH_LINK_SPEED_10G:
617                 eth_link_speed =
618                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
619                 break;
620         case ETH_LINK_SPEED_20G:
621                 eth_link_speed =
622                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
623                 break;
624         case ETH_LINK_SPEED_25G:
625                 eth_link_speed =
626                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
627                 break;
628         case ETH_LINK_SPEED_40G:
629                 eth_link_speed =
630                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
631                 break;
632         case ETH_LINK_SPEED_50G:
633                 eth_link_speed =
634                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
635                 break;
636         default:
637                 RTE_LOG(ERR, PMD,
638                         "Unsupported link speed %d; default to AUTO\n",
639                         conf_link_speed);
640                 break;
641         }
642         return eth_link_speed;
643 }
644
645 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
646                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
647                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
648                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
649
650 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
651 {
652         uint32_t one_speed;
653
654         if (link_speed == ETH_LINK_SPEED_AUTONEG)
655                 return 0;
656
657         if (link_speed & ETH_LINK_SPEED_FIXED) {
658                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
659
660                 if (one_speed & (one_speed - 1)) {
661                         RTE_LOG(ERR, PMD,
662                                 "Invalid advertised speeds (%u) for port %u\n",
663                                 link_speed, port_id);
664                         return -EINVAL;
665                 }
666                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
667                         RTE_LOG(ERR, PMD,
668                                 "Unsupported advertised speed (%u) for port %u\n",
669                                 link_speed, port_id);
670                         return -EINVAL;
671                 }
672         } else {
673                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
674                         RTE_LOG(ERR, PMD,
675                                 "Unsupported advertised speeds (%u) for port %u\n",
676                                 link_speed, port_id);
677                         return -EINVAL;
678                 }
679         }
680         return 0;
681 }
682
683 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
684 {
685         uint16_t ret = 0;
686
687         if (link_speed == ETH_LINK_SPEED_AUTONEG)
688                 link_speed = BNXT_SUPPORTED_SPEEDS;
689
690         if (link_speed & ETH_LINK_SPEED_100M)
691                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
692         if (link_speed & ETH_LINK_SPEED_100M_HD)
693                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
694         if (link_speed & ETH_LINK_SPEED_1G)
695                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
696         if (link_speed & ETH_LINK_SPEED_2_5G)
697                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
698         if (link_speed & ETH_LINK_SPEED_10G)
699                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
700         if (link_speed & ETH_LINK_SPEED_20G)
701                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
702         if (link_speed & ETH_LINK_SPEED_25G)
703                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
704         if (link_speed & ETH_LINK_SPEED_40G)
705                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
706         if (link_speed & ETH_LINK_SPEED_50G)
707                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
708         return ret;
709 }
710
711 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
712 {
713         int rc = 0;
714         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
715         struct bnxt_link_info link_req;
716         uint16_t speed;
717
718         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
719                         bp->eth_dev->data->port_id);
720         if (rc)
721                 goto error;
722
723         memset(&link_req, 0, sizeof(link_req));
724         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
725         link_req.link_up = link_up;
726         if (speed == 0) {
727                 link_req.phy_flags =
728                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
729                 link_req.auto_mode =
730                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW;
731                 link_req.auto_link_speed_mask =
732                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
733                 link_req.auto_link_speed =
734                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB;
735         } else {
736                 link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
737                 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE |
738                         HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
739                 link_req.link_speed = speed;
740         }
741         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
742         link_req.auto_pause = bp->link_info.auto_pause;
743         link_req.force_pause = bp->link_info.force_pause;
744
745         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
746         if (rc) {
747                 RTE_LOG(ERR, PMD,
748                         "Set link config failed with rc %d\n", rc);
749         }
750
751 error:
752         return rc;
753 }