net/bnxt: handle VF/PF initialization appropriately
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <unistd.h>
37
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
44
45 #include "bnxt.h"
46 #include "bnxt_cpr.h"
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_ring.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56
57 #include <rte_io.h>
58
59 #define HWRM_CMD_TIMEOUT                2000
60
61 struct bnxt_plcmodes_cfg {
62         uint32_t        flags;
63         uint16_t        jumbo_thresh;
64         uint16_t        hds_offset;
65         uint16_t        hds_threshold;
66 };
67
68 static int page_getenum(size_t size)
69 {
70         if (size <= 1 << 4)
71                 return 4;
72         if (size <= 1 << 12)
73                 return 12;
74         if (size <= 1 << 13)
75                 return 13;
76         if (size <= 1 << 16)
77                 return 16;
78         if (size <= 1 << 21)
79                 return 21;
80         if (size <= 1 << 22)
81                 return 22;
82         if (size <= 1 << 30)
83                 return 30;
84         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85         return sizeof(void *) * 8 - 1;
86 }
87
88 static int page_roundup(size_t size)
89 {
90         return 1 << page_getenum(size);
91 }
92
93 /*
94  * HWRM Functions (sent to HWRM)
95  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97  * command was failed by the ChiMP.
98  */
99
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
101                                         uint32_t msg_len)
102 {
103         unsigned int i;
104         struct input *req = msg;
105         struct output *resp = bp->hwrm_cmd_resp_addr;
106         uint32_t *data = msg;
107         uint8_t *bar;
108         uint8_t *valid;
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < bp->max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + 0x100;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
153 {
154         int rc;
155
156         rte_spinlock_lock(&bp->hwrm_lock);
157         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158         rte_spinlock_unlock(&bp->hwrm_lock);
159         return rc;
160 }
161
162 #define HWRM_PREP(req, type, cr, resp) \
163         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165         req.cmpl_ring = rte_cpu_to_le_16(cr); \
166         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167         req.target_id = rte_cpu_to_le_16(0xffff); \
168         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
169
170 #define HWRM_CHECK_RESULT \
171         { \
172                 if (rc) { \
173                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
174                                 __func__, rc); \
175                         return rc; \
176                 } \
177                 if (resp->error_code) { \
178                         rc = rte_le_to_cpu_16(resp->error_code); \
179                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
180                         return rc; \
181                 } \
182         }
183
184 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
185 {
186         int rc = 0;
187         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
188         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
189
190         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
191         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
192         req.mask = 0;
193
194         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
195
196         HWRM_CHECK_RESULT;
197
198         return rc;
199 }
200
201 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
202 {
203         int rc = 0;
204         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
205         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
206         uint32_t mask = 0;
207
208         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
209         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
210
211         /* FIXME add multicast flag, when multicast adding options is supported
212          * by ethtool.
213          */
214         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
215                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
216         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
217                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
218         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
219                                     mask);
220
221         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
222
223         HWRM_CHECK_RESULT;
224
225         return rc;
226 }
227
228 int bnxt_hwrm_clear_filter(struct bnxt *bp,
229                            struct bnxt_filter_info *filter)
230 {
231         int rc = 0;
232         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
233         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
234
235         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
236
237         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
238
239         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
240
241         HWRM_CHECK_RESULT;
242
243         filter->fw_l2_filter_id = -1;
244
245         return 0;
246 }
247
248 int bnxt_hwrm_set_filter(struct bnxt *bp,
249                          struct bnxt_vnic_info *vnic,
250                          struct bnxt_filter_info *filter)
251 {
252         int rc = 0;
253         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
254         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
255         uint32_t enables = 0;
256
257         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
258
259         req.flags = rte_cpu_to_le_32(filter->flags);
260
261         enables = filter->enables |
262               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
263         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
264
265         if (enables &
266             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
267                 memcpy(req.l2_addr, filter->l2_addr,
268                        ETHER_ADDR_LEN);
269         if (enables &
270             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
271                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
272                        ETHER_ADDR_LEN);
273         if (enables &
274             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
275                 req.l2_ovlan = filter->l2_ovlan;
276         if (enables &
277             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
278                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
279
280         req.enables = rte_cpu_to_le_32(enables);
281
282         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
283
284         HWRM_CHECK_RESULT;
285
286         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
287
288         return rc;
289 }
290
291 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
292 {
293         int rc = 0;
294         struct hwrm_func_qcaps_input req = {.req_type = 0 };
295         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
296         uint16_t new_max_vfs;
297         int i;
298
299         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
300
301         req.fid = rte_cpu_to_le_16(0xffff);
302
303         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
304
305         HWRM_CHECK_RESULT;
306
307         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
308         if (BNXT_PF(bp)) {
309                 bp->pf.port_id = resp->port_id;
310                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
311                 new_max_vfs = bp->pdev->max_vfs;
312                 if (new_max_vfs != bp->pf.max_vfs) {
313                         if (bp->pf.vf_info)
314                                 rte_free(bp->pf.vf_info);
315                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
316                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
317                         bp->pf.max_vfs = new_max_vfs;
318                         for (i = 0; i < new_max_vfs; i++) {
319                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
320                                 bp->pf.vf_info[i].vlan_table =
321                                         rte_zmalloc("VF VLAN table",
322                                                     getpagesize(),
323                                                     getpagesize());
324                                 if (bp->pf.vf_info[i].vlan_table == NULL)
325                                         RTE_LOG(ERR, PMD,
326                                         "Fail to alloc VLAN table for VF %d\n",
327                                         i);
328                                 else
329                                         rte_mem_lock_page(
330                                                 bp->pf.vf_info[i].vlan_table);
331                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
332                         }
333                 }
334         }
335
336         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
337         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
338         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
339         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
340         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
341         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
342         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
343         /* TODO: For now, do not support VMDq/RFS on VFs. */
344         if (BNXT_PF(bp)) {
345                 if (bp->pf.max_vfs)
346                         bp->max_vnics = 1;
347                 else
348                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
349         } else {
350                 bp->max_vnics = 1;
351         }
352         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
353         if (BNXT_PF(bp))
354                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
355
356         return rc;
357 }
358
359 int bnxt_hwrm_func_reset(struct bnxt *bp)
360 {
361         int rc = 0;
362         struct hwrm_func_reset_input req = {.req_type = 0 };
363         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
364
365         HWRM_PREP(req, FUNC_RESET, -1, resp);
366
367         req.enables = rte_cpu_to_le_32(0);
368
369         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
370
371         HWRM_CHECK_RESULT;
372
373         return rc;
374 }
375
376 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
377 {
378         int rc;
379         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
380         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
381
382         if (bp->flags & BNXT_FLAG_REGISTERED)
383                 return 0;
384
385         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
386         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
387                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
388         req.ver_maj = RTE_VER_YEAR;
389         req.ver_min = RTE_VER_MONTH;
390         req.ver_upd = RTE_VER_MINOR;
391
392         if (BNXT_PF(bp)) {
393                 req.enables |= rte_cpu_to_le_32(
394                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
395                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
396                        RTE_MIN(sizeof(req.vf_req_fwd),
397                                sizeof(bp->pf.vf_req_fwd)));
398         }
399
400         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
401         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
402
403         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
404
405         HWRM_CHECK_RESULT;
406
407         bp->flags |= BNXT_FLAG_REGISTERED;
408
409         return rc;
410 }
411
412 int bnxt_hwrm_ver_get(struct bnxt *bp)
413 {
414         int rc = 0;
415         struct hwrm_ver_get_input req = {.req_type = 0 };
416         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
417         uint32_t my_version;
418         uint32_t fw_version;
419         uint16_t max_resp_len;
420         char type[RTE_MEMZONE_NAMESIZE];
421
422         HWRM_PREP(req, VER_GET, -1, resp);
423
424         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
425         req.hwrm_intf_min = HWRM_VERSION_MINOR;
426         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
427
428         /*
429          * Hold the lock since we may be adjusting the response pointers.
430          */
431         rte_spinlock_lock(&bp->hwrm_lock);
432         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
433
434         HWRM_CHECK_RESULT;
435
436         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
437                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
438                 resp->hwrm_intf_upd,
439                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
440         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
441                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
442         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
443                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
444
445         my_version = HWRM_VERSION_MAJOR << 16;
446         my_version |= HWRM_VERSION_MINOR << 8;
447         my_version |= HWRM_VERSION_UPDATE;
448
449         fw_version = resp->hwrm_intf_maj << 16;
450         fw_version |= resp->hwrm_intf_min << 8;
451         fw_version |= resp->hwrm_intf_upd;
452
453         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
454                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
455                 rc = -EINVAL;
456                 goto error;
457         }
458
459         if (my_version != fw_version) {
460                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
461                 if (my_version < fw_version) {
462                         RTE_LOG(INFO, PMD,
463                                 "Firmware API version is newer than driver.\n");
464                         RTE_LOG(INFO, PMD,
465                                 "The driver may be missing features.\n");
466                 } else {
467                         RTE_LOG(INFO, PMD,
468                                 "Firmware API version is older than driver.\n");
469                         RTE_LOG(INFO, PMD,
470                                 "Not all driver features may be functional.\n");
471                 }
472         }
473
474         if (bp->max_req_len > resp->max_req_win_len) {
475                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
476                 rc = -EINVAL;
477         }
478         bp->max_req_len = resp->max_req_win_len;
479         max_resp_len = resp->max_resp_len;
480         if (bp->max_resp_len != max_resp_len) {
481                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
482                         bp->pdev->addr.domain, bp->pdev->addr.bus,
483                         bp->pdev->addr.devid, bp->pdev->addr.function);
484
485                 rte_free(bp->hwrm_cmd_resp_addr);
486
487                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
488                 if (bp->hwrm_cmd_resp_addr == NULL) {
489                         rc = -ENOMEM;
490                         goto error;
491                 }
492                 bp->hwrm_cmd_resp_dma_addr =
493                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
494                 bp->max_resp_len = max_resp_len;
495         }
496
497 error:
498         rte_spinlock_unlock(&bp->hwrm_lock);
499         return rc;
500 }
501
502 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
503 {
504         int rc;
505         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
506         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
507
508         if (!(bp->flags & BNXT_FLAG_REGISTERED))
509                 return 0;
510
511         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
512         req.flags = flags;
513
514         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
515
516         HWRM_CHECK_RESULT;
517
518         bp->flags &= ~BNXT_FLAG_REGISTERED;
519
520         return rc;
521 }
522
523 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
524 {
525         int rc = 0;
526         struct hwrm_port_phy_cfg_input req = {0};
527         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
528         uint32_t enables = 0;
529
530         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
531
532         if (conf->link_up) {
533                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
534                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
535                 /*
536                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
537                  * any auto mode, even "none".
538                  */
539                 if (!conf->link_speed) {
540                         req.auto_mode |= conf->auto_mode;
541                         enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
542                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
543                         enables |=
544                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
545                         req.auto_link_speed = bp->link_info.auto_link_speed;
546                         enables |=
547                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
548                 }
549                 req.auto_duplex = conf->duplex;
550                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
551                 req.auto_pause = conf->auto_pause;
552                 req.force_pause = conf->force_pause;
553                 /* Set force_pause if there is no auto or if there is a force */
554                 if (req.auto_pause && !req.force_pause)
555                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
556                 else
557                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
558
559                 req.enables = rte_cpu_to_le_32(enables);
560         } else {
561                 req.flags =
562                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
563                 RTE_LOG(INFO, PMD, "Force Link Down\n");
564         }
565
566         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
567
568         HWRM_CHECK_RESULT;
569
570         return rc;
571 }
572
573 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
574                                    struct bnxt_link_info *link_info)
575 {
576         int rc = 0;
577         struct hwrm_port_phy_qcfg_input req = {0};
578         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
579
580         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
581
582         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
583
584         HWRM_CHECK_RESULT;
585
586         link_info->phy_link_status = resp->link;
587         if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
588                 link_info->link_up = 1;
589                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
590         } else {
591                 link_info->link_up = 0;
592                 link_info->link_speed = 0;
593         }
594         link_info->duplex = resp->duplex;
595         link_info->pause = resp->pause;
596         link_info->auto_pause = resp->auto_pause;
597         link_info->force_pause = resp->force_pause;
598         link_info->auto_mode = resp->auto_mode;
599
600         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
601         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
602         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
603         link_info->phy_ver[0] = resp->phy_maj;
604         link_info->phy_ver[1] = resp->phy_min;
605         link_info->phy_ver[2] = resp->phy_bld;
606
607         return rc;
608 }
609
610 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
611 {
612         int rc = 0;
613         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
614         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
615
616         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
617
618         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
619
620         HWRM_CHECK_RESULT;
621
622 #define GET_QUEUE_INFO(x) \
623         bp->cos_queue[x].id = resp->queue_id##x; \
624         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
625
626         GET_QUEUE_INFO(0);
627         GET_QUEUE_INFO(1);
628         GET_QUEUE_INFO(2);
629         GET_QUEUE_INFO(3);
630         GET_QUEUE_INFO(4);
631         GET_QUEUE_INFO(5);
632         GET_QUEUE_INFO(6);
633         GET_QUEUE_INFO(7);
634
635         return rc;
636 }
637
638 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
639                          struct bnxt_ring *ring,
640                          uint32_t ring_type, uint32_t map_index,
641                          uint32_t stats_ctx_id)
642 {
643         int rc = 0;
644         struct hwrm_ring_alloc_input req = {.req_type = 0 };
645         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
646
647         HWRM_PREP(req, RING_ALLOC, -1, resp);
648
649         req.enables = rte_cpu_to_le_32(0);
650
651         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
652         req.fbo = rte_cpu_to_le_32(0);
653         /* Association of ring index with doorbell index */
654         req.logical_id = rte_cpu_to_le_16(map_index);
655
656         switch (ring_type) {
657         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
658                 req.queue_id = bp->cos_queue[0].id;
659                 /* FALLTHROUGH */
660         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
661                 req.ring_type = ring_type;
662                 req.cmpl_ring_id =
663                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
664                 req.length = rte_cpu_to_le_32(ring->ring_size);
665                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
666                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
667                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
668                 break;
669         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
670                 req.ring_type = ring_type;
671                 /*
672                  * TODO: Some HWRM versions crash with
673                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
674                  */
675                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
676                 req.length = rte_cpu_to_le_32(ring->ring_size);
677                 break;
678         default:
679                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
680                         ring_type);
681                 return -1;
682         }
683
684         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
685
686         if (rc || resp->error_code) {
687                 if (rc == 0 && resp->error_code)
688                         rc = rte_le_to_cpu_16(resp->error_code);
689                 switch (ring_type) {
690                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
691                         RTE_LOG(ERR, PMD,
692                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
693                         return rc;
694                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
695                         RTE_LOG(ERR, PMD,
696                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
697                         return rc;
698                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
699                         RTE_LOG(ERR, PMD,
700                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
701                         return rc;
702                 default:
703                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
704                         return rc;
705                 }
706         }
707
708         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
709         return rc;
710 }
711
712 int bnxt_hwrm_ring_free(struct bnxt *bp,
713                         struct bnxt_ring *ring, uint32_t ring_type)
714 {
715         int rc;
716         struct hwrm_ring_free_input req = {.req_type = 0 };
717         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
718
719         HWRM_PREP(req, RING_FREE, -1, resp);
720
721         req.ring_type = ring_type;
722         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
723
724         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
725
726         if (rc || resp->error_code) {
727                 if (rc == 0 && resp->error_code)
728                         rc = rte_le_to_cpu_16(resp->error_code);
729
730                 switch (ring_type) {
731                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
732                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
733                                 rc);
734                         return rc;
735                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
736                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
737                                 rc);
738                         return rc;
739                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
740                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
741                                 rc);
742                         return rc;
743                 default:
744                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
745                         return rc;
746                 }
747         }
748         return 0;
749 }
750
751 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
752 {
753         int rc = 0;
754         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
755         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
756
757         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
758
759         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
760         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
761         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
762         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
763
764         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
765
766         HWRM_CHECK_RESULT;
767
768         bp->grp_info[idx].fw_grp_id =
769             rte_le_to_cpu_16(resp->ring_group_id);
770
771         return rc;
772 }
773
774 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
775 {
776         int rc;
777         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
778         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
779
780         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
781
782         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
783
784         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
785
786         HWRM_CHECK_RESULT;
787
788         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
789         return rc;
790 }
791
792 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
793 {
794         int rc = 0;
795         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
796         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
797
798         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
799
800         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
801                 return rc;
802
803         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
804         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
805
806         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
807
808         HWRM_CHECK_RESULT;
809
810         return rc;
811 }
812
813 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
814                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
815 {
816         int rc;
817         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
818         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
819
820         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
821
822         req.update_period_ms = rte_cpu_to_le_32(1000);
823
824         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
825         req.stats_dma_addr =
826             rte_cpu_to_le_64(cpr->hw_stats_map);
827
828         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
829
830         HWRM_CHECK_RESULT;
831
832         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
833         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
834
835         return rc;
836 }
837
838 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
839                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
840 {
841         int rc;
842         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
843         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
844
845         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
846
847         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
848         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
849
850         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
851
852         HWRM_CHECK_RESULT;
853
854         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
855         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
856
857         return rc;
858 }
859
860 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
861 {
862         int rc = 0, i, j;
863         struct hwrm_vnic_alloc_input req = { 0 };
864         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
865
866         /* map ring groups to this vnic */
867         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
868                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
869                         RTE_LOG(ERR, PMD,
870                                 "Not enough ring groups avail:%x req:%x\n", j,
871                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
872                         break;
873                 }
874                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
875         }
876         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
877         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
878         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
879         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
880         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
881                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
882         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
883
884         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
885
886         HWRM_CHECK_RESULT;
887
888         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
889         return rc;
890 }
891
892 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
893                                         struct bnxt_vnic_info *vnic,
894                                         struct bnxt_plcmodes_cfg *pmode)
895 {
896         int rc = 0;
897         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
898         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
899
900         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
901
902         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
903
904         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
905
906         HWRM_CHECK_RESULT;
907
908         pmode->flags = rte_le_to_cpu_32(resp->flags);
909         /* dflt_vnic bit doesn't exist in the _cfg command */
910         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
911         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
912         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
913         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
914
915         return rc;
916 }
917
918 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
919                                        struct bnxt_vnic_info *vnic,
920                                        struct bnxt_plcmodes_cfg *pmode)
921 {
922         int rc = 0;
923         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
924         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
925
926         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
927
928         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
929         req.flags = rte_cpu_to_le_32(pmode->flags);
930         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
931         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
932         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
933         req.enables = rte_cpu_to_le_32(
934             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
935             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
936             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
937         );
938
939         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
940
941         HWRM_CHECK_RESULT;
942
943         return rc;
944 }
945
946 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
947 {
948         int rc = 0;
949         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
950         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
951         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
952         struct bnxt_plcmodes_cfg pmodes;
953
954         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
955         if (rc)
956                 return rc;
957
958         HWRM_PREP(req, VNIC_CFG, -1, resp);
959
960         /* Only RSS support for now TBD: COS & LB */
961         req.enables =
962             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
963                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
964         if (vnic->lb_rule != 0xffff)
965                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
966         if (vnic->cos_rule != 0xffff)
967                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
968         if (vnic->rss_rule != 0xffff)
969                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
970         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
971         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
972         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
973         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
974         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
975         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
976         req.mru = rte_cpu_to_le_16(vnic->mru);
977         if (vnic->func_default)
978                 req.flags |=
979                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
980         if (vnic->vlan_strip)
981                 req.flags |=
982                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
983         if (vnic->bd_stall)
984                 req.flags |=
985                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
986         if (vnic->roce_dual)
987                 req.flags |= rte_cpu_to_le_32(
988                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
989         if (vnic->roce_only)
990                 req.flags |= rte_cpu_to_le_32(
991                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
992         if (vnic->rss_dflt_cr)
993                 req.flags |= rte_cpu_to_le_32(
994                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
995
996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
997
998         HWRM_CHECK_RESULT;
999
1000         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1001
1002         return rc;
1003 }
1004
1005 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1006                 int16_t fw_vf_id)
1007 {
1008         int rc = 0;
1009         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1010         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1011
1012         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1013
1014         req.enables =
1015                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1016         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1017         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1018
1019         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1020
1021         HWRM_CHECK_RESULT;
1022
1023         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1024         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1025         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1026         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1027         vnic->mru = rte_le_to_cpu_16(resp->mru);
1028         vnic->func_default = rte_le_to_cpu_32(
1029                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1030         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1031                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1032         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1033                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1034         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1035                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1036         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1037                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1038         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1039                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1040
1041         return rc;
1042 }
1043
1044 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1045 {
1046         int rc = 0;
1047         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1048         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1049                                                 bp->hwrm_cmd_resp_addr;
1050
1051         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1052
1053         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1054
1055         HWRM_CHECK_RESULT;
1056
1057         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1058
1059         return rc;
1060 }
1061
1062 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1063 {
1064         int rc = 0;
1065         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1066         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1067                                                 bp->hwrm_cmd_resp_addr;
1068
1069         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1070
1071         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1072
1073         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1074
1075         HWRM_CHECK_RESULT;
1076
1077         vnic->rss_rule = INVALID_HW_RING_ID;
1078
1079         return rc;
1080 }
1081
1082 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1083 {
1084         int rc = 0;
1085         struct hwrm_vnic_free_input req = {.req_type = 0 };
1086         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1087
1088         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1089                 return rc;
1090
1091         HWRM_PREP(req, VNIC_FREE, -1, resp);
1092
1093         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1094
1095         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1096
1097         HWRM_CHECK_RESULT;
1098
1099         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1100         return rc;
1101 }
1102
1103 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1104                            struct bnxt_vnic_info *vnic)
1105 {
1106         int rc = 0;
1107         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1108         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1109
1110         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1111
1112         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1113
1114         req.ring_grp_tbl_addr =
1115             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1116         req.hash_key_tbl_addr =
1117             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1118         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1119
1120         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1121
1122         HWRM_CHECK_RESULT;
1123
1124         return rc;
1125 }
1126
1127 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1128 {
1129         struct hwrm_func_cfg_input req = {0};
1130         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1131         int rc;
1132
1133         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1134         req.enables = rte_cpu_to_le_32(
1135                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1136         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1137         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1138
1139         HWRM_PREP(req, FUNC_CFG, -1, resp);
1140
1141         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1142         HWRM_CHECK_RESULT;
1143
1144         bp->pf.vf_info[vf].random_mac = false;
1145
1146         return rc;
1147 }
1148
1149 /*
1150  * HWRM utility functions
1151  */
1152
1153 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1154 {
1155         unsigned int i;
1156         int rc = 0;
1157
1158         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1159                 struct bnxt_tx_queue *txq;
1160                 struct bnxt_rx_queue *rxq;
1161                 struct bnxt_cp_ring_info *cpr;
1162
1163                 if (i >= bp->rx_cp_nr_rings) {
1164                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1165                         cpr = txq->cp_ring;
1166                 } else {
1167                         rxq = bp->rx_queues[i];
1168                         cpr = rxq->cp_ring;
1169                 }
1170
1171                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1172                 if (rc)
1173                         return rc;
1174         }
1175         return 0;
1176 }
1177
1178 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1179 {
1180         int rc;
1181         unsigned int i;
1182         struct bnxt_cp_ring_info *cpr;
1183
1184         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1185                 unsigned int idx = i + 1;
1186
1187                 if (i >= bp->rx_cp_nr_rings)
1188                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1189                 else
1190                         cpr = bp->rx_queues[i]->cp_ring;
1191                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1192                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1193                         if (rc)
1194                                 return rc;
1195                 }
1196         }
1197         return 0;
1198 }
1199
1200 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1201 {
1202         unsigned int i;
1203         int rc = 0;
1204
1205         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1206                 struct bnxt_tx_queue *txq;
1207                 struct bnxt_rx_queue *rxq;
1208                 struct bnxt_cp_ring_info *cpr;
1209                 unsigned int idx = i + 1;
1210
1211                 if (i >= bp->rx_cp_nr_rings) {
1212                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1213                         cpr = txq->cp_ring;
1214                 } else {
1215                         rxq = bp->rx_queues[i];
1216                         cpr = rxq->cp_ring;
1217                 }
1218
1219                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1220
1221                 if (rc)
1222                         return rc;
1223         }
1224         return rc;
1225 }
1226
1227 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1228 {
1229         uint16_t i;
1230         uint32_t rc = 0;
1231
1232         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1233                 unsigned int idx = i + 1;
1234
1235                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1236                         RTE_LOG(ERR, PMD,
1237                                 "Attempt to free invalid ring group %d\n",
1238                                 idx);
1239                         continue;
1240                 }
1241
1242                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1243
1244                 if (rc)
1245                         return rc;
1246         }
1247         return rc;
1248 }
1249
1250 static void bnxt_free_cp_ring(struct bnxt *bp,
1251                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1252 {
1253         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1254
1255         bnxt_hwrm_ring_free(bp, cp_ring,
1256                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1257         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1258         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1259         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1260                         sizeof(*cpr->cp_desc_ring));
1261         cpr->cp_raw_cons = 0;
1262 }
1263
1264 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1265 {
1266         unsigned int i;
1267         int rc = 0;
1268
1269         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1270                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1271                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1272                 struct bnxt_ring *ring = txr->tx_ring_struct;
1273                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1274                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1275
1276                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1277                         bnxt_hwrm_ring_free(bp, ring,
1278                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1279                         ring->fw_ring_id = INVALID_HW_RING_ID;
1280                         memset(txr->tx_desc_ring, 0,
1281                                         txr->tx_ring_struct->ring_size *
1282                                         sizeof(*txr->tx_desc_ring));
1283                         memset(txr->tx_buf_ring, 0,
1284                                         txr->tx_ring_struct->ring_size *
1285                                         sizeof(*txr->tx_buf_ring));
1286                         txr->tx_prod = 0;
1287                         txr->tx_cons = 0;
1288                 }
1289                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1290                         bnxt_free_cp_ring(bp, cpr, idx);
1291         }
1292
1293         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1294                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1295                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1296                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1297                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1298                 unsigned int idx = i + 1;
1299
1300                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1301                         bnxt_hwrm_ring_free(bp, ring,
1302                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1303                         ring->fw_ring_id = INVALID_HW_RING_ID;
1304                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1305                         memset(rxr->rx_desc_ring, 0,
1306                                         rxr->rx_ring_struct->ring_size *
1307                                         sizeof(*rxr->rx_desc_ring));
1308                         memset(rxr->rx_buf_ring, 0,
1309                                         rxr->rx_ring_struct->ring_size *
1310                                         sizeof(*rxr->rx_buf_ring));
1311                         rxr->rx_prod = 0;
1312                 }
1313                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1314                         bnxt_free_cp_ring(bp, cpr, idx);
1315         }
1316
1317         /* Default completion ring */
1318         {
1319                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1320
1321                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1322                         bnxt_free_cp_ring(bp, cpr, 0);
1323         }
1324
1325         return rc;
1326 }
1327
1328 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1329 {
1330         uint16_t i;
1331         uint32_t rc = 0;
1332
1333         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1334                 unsigned int idx = i + 1;
1335
1336                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1337                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1338                         continue;
1339
1340                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1341
1342                 if (rc)
1343                         return rc;
1344         }
1345         return rc;
1346 }
1347
1348 void bnxt_free_hwrm_resources(struct bnxt *bp)
1349 {
1350         /* Release memzone */
1351         rte_free(bp->hwrm_cmd_resp_addr);
1352         bp->hwrm_cmd_resp_addr = NULL;
1353         bp->hwrm_cmd_resp_dma_addr = 0;
1354 }
1355
1356 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1357 {
1358         struct rte_pci_device *pdev = bp->pdev;
1359         char type[RTE_MEMZONE_NAMESIZE];
1360
1361         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1362                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1363         bp->max_req_len = HWRM_MAX_REQ_LEN;
1364         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1365         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1366         if (bp->hwrm_cmd_resp_addr == NULL)
1367                 return -ENOMEM;
1368         bp->hwrm_cmd_resp_dma_addr =
1369                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1370         rte_spinlock_init(&bp->hwrm_lock);
1371
1372         return 0;
1373 }
1374
1375 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1376 {
1377         struct bnxt_filter_info *filter;
1378         int rc = 0;
1379
1380         STAILQ_FOREACH(filter, &vnic->filter, next) {
1381                 rc = bnxt_hwrm_clear_filter(bp, filter);
1382                 if (rc)
1383                         break;
1384         }
1385         return rc;
1386 }
1387
1388 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1389 {
1390         struct bnxt_filter_info *filter;
1391         int rc = 0;
1392
1393         STAILQ_FOREACH(filter, &vnic->filter, next) {
1394                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1395                 if (rc)
1396                         break;
1397         }
1398         return rc;
1399 }
1400
1401 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1402 {
1403         struct bnxt_vnic_info *vnic;
1404         unsigned int i;
1405
1406         if (bp->vnic_info == NULL)
1407                 return;
1408
1409         vnic = &bp->vnic_info[0];
1410         if (BNXT_PF(bp))
1411                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1412
1413         /* VNIC resources */
1414         for (i = 0; i < bp->nr_vnics; i++) {
1415                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1416
1417                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1418
1419                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1420                 bnxt_hwrm_vnic_free(bp, vnic);
1421         }
1422         /* Ring resources */
1423         bnxt_free_all_hwrm_rings(bp);
1424         bnxt_free_all_hwrm_ring_grps(bp);
1425         bnxt_free_all_hwrm_stat_ctxs(bp);
1426 }
1427
1428 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1429 {
1430         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1431
1432         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1433                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1434
1435         switch (conf_link_speed) {
1436         case ETH_LINK_SPEED_10M_HD:
1437         case ETH_LINK_SPEED_100M_HD:
1438                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1439         }
1440         return hw_link_duplex;
1441 }
1442
1443 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1444 {
1445         uint16_t eth_link_speed = 0;
1446
1447         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1448                 return ETH_LINK_SPEED_AUTONEG;
1449
1450         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1451         case ETH_LINK_SPEED_100M:
1452         case ETH_LINK_SPEED_100M_HD:
1453                 eth_link_speed =
1454                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1455                 break;
1456         case ETH_LINK_SPEED_1G:
1457                 eth_link_speed =
1458                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1459                 break;
1460         case ETH_LINK_SPEED_2_5G:
1461                 eth_link_speed =
1462                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1463                 break;
1464         case ETH_LINK_SPEED_10G:
1465                 eth_link_speed =
1466                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1467                 break;
1468         case ETH_LINK_SPEED_20G:
1469                 eth_link_speed =
1470                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1471                 break;
1472         case ETH_LINK_SPEED_25G:
1473                 eth_link_speed =
1474                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1475                 break;
1476         case ETH_LINK_SPEED_40G:
1477                 eth_link_speed =
1478                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1479                 break;
1480         case ETH_LINK_SPEED_50G:
1481                 eth_link_speed =
1482                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1483                 break;
1484         default:
1485                 RTE_LOG(ERR, PMD,
1486                         "Unsupported link speed %d; default to AUTO\n",
1487                         conf_link_speed);
1488                 break;
1489         }
1490         return eth_link_speed;
1491 }
1492
1493 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1494                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1495                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1496                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1497
1498 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1499 {
1500         uint32_t one_speed;
1501
1502         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1503                 return 0;
1504
1505         if (link_speed & ETH_LINK_SPEED_FIXED) {
1506                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1507
1508                 if (one_speed & (one_speed - 1)) {
1509                         RTE_LOG(ERR, PMD,
1510                                 "Invalid advertised speeds (%u) for port %u\n",
1511                                 link_speed, port_id);
1512                         return -EINVAL;
1513                 }
1514                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1515                         RTE_LOG(ERR, PMD,
1516                                 "Unsupported advertised speed (%u) for port %u\n",
1517                                 link_speed, port_id);
1518                         return -EINVAL;
1519                 }
1520         } else {
1521                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1522                         RTE_LOG(ERR, PMD,
1523                                 "Unsupported advertised speeds (%u) for port %u\n",
1524                                 link_speed, port_id);
1525                         return -EINVAL;
1526                 }
1527         }
1528         return 0;
1529 }
1530
1531 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1532 {
1533         uint16_t ret = 0;
1534
1535         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1536                 link_speed = BNXT_SUPPORTED_SPEEDS;
1537
1538         if (link_speed & ETH_LINK_SPEED_100M)
1539                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1540         if (link_speed & ETH_LINK_SPEED_100M_HD)
1541                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1542         if (link_speed & ETH_LINK_SPEED_1G)
1543                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1544         if (link_speed & ETH_LINK_SPEED_2_5G)
1545                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1546         if (link_speed & ETH_LINK_SPEED_10G)
1547                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1548         if (link_speed & ETH_LINK_SPEED_20G)
1549                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1550         if (link_speed & ETH_LINK_SPEED_25G)
1551                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1552         if (link_speed & ETH_LINK_SPEED_40G)
1553                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1554         if (link_speed & ETH_LINK_SPEED_50G)
1555                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1556         return ret;
1557 }
1558
1559 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1560 {
1561         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1562
1563         switch (hw_link_speed) {
1564         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1565                 eth_link_speed = ETH_SPEED_NUM_100M;
1566                 break;
1567         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1568                 eth_link_speed = ETH_SPEED_NUM_1G;
1569                 break;
1570         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1571                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1572                 break;
1573         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1574                 eth_link_speed = ETH_SPEED_NUM_10G;
1575                 break;
1576         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1577                 eth_link_speed = ETH_SPEED_NUM_20G;
1578                 break;
1579         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1580                 eth_link_speed = ETH_SPEED_NUM_25G;
1581                 break;
1582         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1583                 eth_link_speed = ETH_SPEED_NUM_40G;
1584                 break;
1585         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1586                 eth_link_speed = ETH_SPEED_NUM_50G;
1587                 break;
1588         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1589         default:
1590                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1591                         hw_link_speed);
1592                 break;
1593         }
1594         return eth_link_speed;
1595 }
1596
1597 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1598 {
1599         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1600
1601         switch (hw_link_duplex) {
1602         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1603         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1604                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1605                 break;
1606         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1607                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1608                 break;
1609         default:
1610                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1611                         hw_link_duplex);
1612                 break;
1613         }
1614         return eth_link_duplex;
1615 }
1616
1617 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1618 {
1619         int rc = 0;
1620         struct bnxt_link_info *link_info = &bp->link_info;
1621
1622         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1623         if (rc) {
1624                 RTE_LOG(ERR, PMD,
1625                         "Get link config failed with rc %d\n", rc);
1626                 goto exit;
1627         }
1628         if (link_info->link_up)
1629                 link->link_speed =
1630                         bnxt_parse_hw_link_speed(link_info->link_speed);
1631         else
1632                 link->link_speed = ETH_LINK_SPEED_10M;
1633         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1634         link->link_status = link_info->link_up;
1635         link->link_autoneg = link_info->auto_mode ==
1636                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1637                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1638 exit:
1639         return rc;
1640 }
1641
1642 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1643 {
1644         int rc = 0;
1645         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1646         struct bnxt_link_info link_req;
1647         uint16_t speed;
1648
1649         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1650                 return 0;
1651
1652         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1653                         bp->eth_dev->data->port_id);
1654         if (rc)
1655                 goto error;
1656
1657         memset(&link_req, 0, sizeof(link_req));
1658         link_req.link_up = link_up;
1659         if (!link_up)
1660                 goto port_phy_cfg;
1661
1662         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1663         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1664         if (speed == 0) {
1665                 link_req.phy_flags |=
1666                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1667                 link_req.auto_mode =
1668                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1669                 link_req.auto_link_speed_mask =
1670                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1671         } else {
1672                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1673                 link_req.link_speed = speed;
1674                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1675         }
1676         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1677         link_req.auto_pause = bp->link_info.auto_pause;
1678         link_req.force_pause = bp->link_info.force_pause;
1679
1680 port_phy_cfg:
1681         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1682         if (rc) {
1683                 RTE_LOG(ERR, PMD,
1684                         "Set link config failed with rc %d\n", rc);
1685         }
1686
1687         rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1688 error:
1689         return rc;
1690 }
1691
1692 /* JIRA 22088 */
1693 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1694 {
1695         struct hwrm_func_qcfg_input req = {0};
1696         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1697         int rc = 0;
1698
1699         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1700         req.fid = rte_cpu_to_le_16(0xffff);
1701
1702         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1703
1704         HWRM_CHECK_RESULT;
1705
1706         /* Hard Coded.. 0xfff VLAN ID mask */
1707         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1708
1709         switch (resp->port_partition_type) {
1710         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1711         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1712         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1713                 bp->port_partition_type = resp->port_partition_type;
1714                 break;
1715         default:
1716                 bp->port_partition_type = 0;
1717                 break;
1718         }
1719
1720         return rc;
1721 }
1722
1723 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1724                                    struct hwrm_func_qcaps_output *qcaps)
1725 {
1726         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1727         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1728                sizeof(qcaps->mac_address));
1729         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1730         qcaps->max_rx_rings = fcfg->num_rx_rings;
1731         qcaps->max_tx_rings = fcfg->num_tx_rings;
1732         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1733         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1734         qcaps->max_vfs = 0;
1735         qcaps->first_vf_id = 0;
1736         qcaps->max_vnics = fcfg->num_vnics;
1737         qcaps->max_decap_records = 0;
1738         qcaps->max_encap_records = 0;
1739         qcaps->max_tx_wm_flows = 0;
1740         qcaps->max_tx_em_flows = 0;
1741         qcaps->max_rx_wm_flows = 0;
1742         qcaps->max_rx_em_flows = 0;
1743         qcaps->max_flow_id = 0;
1744         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1745         qcaps->max_sp_tx_rings = 0;
1746         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1747 }
1748
1749 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1750 {
1751         struct hwrm_func_cfg_input req = {0};
1752         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1753         int rc;
1754
1755         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1756                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1757                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1758                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1759                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1760                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1761                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1762                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1763                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1764                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1765         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1766         req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1767                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1768         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1769                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1770         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1771         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1772         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1773         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1774         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1775         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1776         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1777         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1778         req.fid = rte_cpu_to_le_16(0xffff);
1779
1780         HWRM_PREP(req, FUNC_CFG, -1, resp);
1781
1782         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1783         HWRM_CHECK_RESULT;
1784
1785         return rc;
1786 }
1787
1788 static void populate_vf_func_cfg_req(struct bnxt *bp,
1789                                      struct hwrm_func_cfg_input *req,
1790                                      int num_vfs)
1791 {
1792         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1793                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1794                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1795                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1796                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1797                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1798                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1799                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1800                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1801                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1802
1803         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1804                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1805         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1806                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1807         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1808                                                 (num_vfs + 1));
1809         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1810         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1811                                                (num_vfs + 1));
1812         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1813         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1814         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1815         /* TODO: For now, do not support VMDq/RFS on VFs. */
1816         req->num_vnics = rte_cpu_to_le_16(1);
1817         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1818                                                  (num_vfs + 1));
1819 }
1820
1821 static void add_random_mac_if_needed(struct bnxt *bp,
1822                                      struct hwrm_func_cfg_input *cfg_req,
1823                                      int vf)
1824 {
1825         struct ether_addr mac;
1826
1827         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1828                 return;
1829
1830         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1831                 cfg_req->enables |=
1832                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1833                 eth_random_addr(cfg_req->dflt_mac_addr);
1834                 bp->pf.vf_info[vf].random_mac = true;
1835         } else {
1836                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1837         }
1838 }
1839
1840 static void reserve_resources_from_vf(struct bnxt *bp,
1841                                       struct hwrm_func_cfg_input *cfg_req,
1842                                       int vf)
1843 {
1844         struct hwrm_func_qcaps_input req = {0};
1845         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1846         int rc;
1847
1848         /* Get the actual allocated values now */
1849         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1850         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1851         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1852
1853         if (rc) {
1854                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1855                 copy_func_cfg_to_qcaps(cfg_req, resp);
1856         } else if (resp->error_code) {
1857                 rc = rte_le_to_cpu_16(resp->error_code);
1858                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1859                 copy_func_cfg_to_qcaps(cfg_req, resp);
1860         }
1861
1862         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1863         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1864         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1865         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1866         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1867         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1868         /*
1869          * TODO: While not supporting VMDq with VFs, max_vnics is always
1870          * forced to 1 in this case
1871          */
1872         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1873         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1874 }
1875
1876 static int update_pf_resource_max(struct bnxt *bp)
1877 {
1878         struct hwrm_func_qcfg_input req = {0};
1879         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1880         int rc;
1881
1882         /* And copy the allocated numbers into the pf struct */
1883         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1884         req.fid = rte_cpu_to_le_16(0xffff);
1885         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1886         HWRM_CHECK_RESULT;
1887
1888         /* Only TX ring value reflects actual allocation? TODO */
1889         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
1890         bp->pf.evb_mode = resp->evb_mode;
1891
1892         return rc;
1893 }
1894
1895 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
1896 {
1897         int rc;
1898
1899         if (!BNXT_PF(bp)) {
1900                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1901                 return -1;
1902         }
1903
1904         rc = bnxt_hwrm_func_qcaps(bp);
1905         if (rc)
1906                 return rc;
1907
1908         bp->pf.func_cfg_flags &=
1909                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1910                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1911         bp->pf.func_cfg_flags |=
1912                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
1913         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
1914         return rc;
1915 }
1916
1917 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
1918 {
1919         struct hwrm_func_cfg_input req = {0};
1920         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1921         int i;
1922         size_t sz;
1923         int rc = 0;
1924         size_t req_buf_sz;
1925
1926         if (!BNXT_PF(bp)) {
1927                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1928                 return -1;
1929         }
1930
1931         rc = bnxt_hwrm_func_qcaps(bp);
1932
1933         if (rc)
1934                 return rc;
1935
1936         bp->pf.active_vfs = num_vfs;
1937
1938         /*
1939          * First, configure the PF to only use one TX ring.  This ensures that
1940          * there are enough rings for all VFs.
1941          *
1942          * If we don't do this, when we call func_alloc() later, we will lock
1943          * extra rings to the PF that won't be available during func_cfg() of
1944          * the VFs.
1945          *
1946          * This has been fixed with firmware versions above 20.6.54
1947          */
1948         bp->pf.func_cfg_flags &=
1949                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1950                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1951         bp->pf.func_cfg_flags |=
1952                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
1953         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
1954         if (rc)
1955                 return rc;
1956
1957         /*
1958          * Now, create and register a buffer to hold forwarded VF requests
1959          */
1960         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
1961         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
1962                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
1963         if (bp->pf.vf_req_buf == NULL) {
1964                 rc = -ENOMEM;
1965                 goto error_free;
1966         }
1967         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
1968                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
1969         for (i = 0; i < num_vfs; i++)
1970                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
1971                                         (i * HWRM_MAX_REQ_LEN);
1972
1973         rc = bnxt_hwrm_func_buf_rgtr(bp);
1974         if (rc)
1975                 goto error_free;
1976
1977         populate_vf_func_cfg_req(bp, &req, num_vfs);
1978
1979         bp->pf.active_vfs = 0;
1980         for (i = 0; i < num_vfs; i++) {
1981                 add_random_mac_if_needed(bp, &req, i);
1982
1983                 HWRM_PREP(req, FUNC_CFG, -1, resp);
1984                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
1985                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
1986                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1987
1988                 /* Clear enable flag for next pass */
1989                 req.enables &= ~rte_cpu_to_le_32(
1990                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1991
1992                 if (rc || resp->error_code) {
1993                         RTE_LOG(ERR, PMD,
1994                                 "Failed to initizlie VF %d\n", i);
1995                         RTE_LOG(ERR, PMD,
1996                                 "Not all VFs available. (%d, %d)\n",
1997                                 rc, resp->error_code);
1998                         break;
1999                 }
2000
2001                 reserve_resources_from_vf(bp, &req, i);
2002                 bp->pf.active_vfs++;
2003         }
2004
2005         /*
2006          * Now configure the PF to use "the rest" of the resources
2007          * We're using STD_TX_RING_MODE here though which will limit the TX
2008          * rings.  This will allow QoS to function properly.  Not setting this
2009          * will cause PF rings to break bandwidth settings.
2010          */
2011         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2012         if (rc)
2013                 goto error_free;
2014
2015         rc = update_pf_resource_max(bp);
2016         if (rc)
2017                 goto error_free;
2018
2019         return rc;
2020
2021 error_free:
2022         bnxt_hwrm_func_buf_unrgtr(bp);
2023         return rc;
2024 }
2025
2026
2027 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2028 {
2029         int rc = 0;
2030         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2031         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2032
2033         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2034
2035         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2036         req.req_buf_page_size = rte_cpu_to_le_16(
2037                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2038         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2039         req.req_buf_page_addr[0] =
2040                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2041         if (req.req_buf_page_addr[0] == 0) {
2042                 RTE_LOG(ERR, PMD,
2043                         "unable to map buffer address to physical memory\n");
2044                 return -ENOMEM;
2045         }
2046
2047         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2048
2049         HWRM_CHECK_RESULT;
2050
2051         return rc;
2052 }
2053
2054 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2055 {
2056         int rc = 0;
2057         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2058         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2059
2060         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2061
2062         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2063
2064         HWRM_CHECK_RESULT;
2065
2066         return rc;
2067 }
2068
2069 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2070 {
2071         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2072         struct hwrm_func_cfg_input req = {0};
2073         int rc;
2074
2075         HWRM_PREP(req, FUNC_CFG, -1, resp);
2076         req.fid = rte_cpu_to_le_16(0xffff);
2077         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2078         req.enables = rte_cpu_to_le_32(
2079                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2080         req.async_event_cr = rte_cpu_to_le_16(
2081                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2082         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2083         HWRM_CHECK_RESULT;
2084
2085         return rc;
2086 }
2087
2088 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2089 {
2090         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2091         struct hwrm_func_vf_cfg_input req = {0};
2092         int rc;
2093
2094         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2095         req.enables = rte_cpu_to_le_32(
2096                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2097         req.async_event_cr = rte_cpu_to_le_16(
2098                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2099         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2100         HWRM_CHECK_RESULT;
2101
2102         return rc;
2103 }
2104
2105 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2106                               void *encaped, size_t ec_size)
2107 {
2108         int rc = 0;
2109         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2110         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2111
2112         if (ec_size > sizeof(req.encap_request))
2113                 return -1;
2114
2115         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2116
2117         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2118         memcpy(req.encap_request, encaped, ec_size);
2119
2120         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2121
2122         HWRM_CHECK_RESULT;
2123
2124         return rc;
2125 }
2126
2127 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2128                                        struct ether_addr *mac)
2129 {
2130         struct hwrm_func_qcfg_input req = {0};
2131         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2132         int rc;
2133
2134         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2135         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2136         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2137
2138         HWRM_CHECK_RESULT;
2139
2140         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2141         return rc;
2142 }
2143
2144 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2145                             void *encaped, size_t ec_size)
2146 {
2147         int rc = 0;
2148         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2149         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2150
2151         if (ec_size > sizeof(req.encap_request))
2152                 return -1;
2153
2154         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2155
2156         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2157         memcpy(req.encap_request, encaped, ec_size);
2158
2159         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2160
2161         HWRM_CHECK_RESULT;
2162
2163         return rc;
2164 }