net/bnxt: refactor to properly allocate resources for PF/VF
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                2000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107
108         /* Write request msg to hwrm channel */
109         for (i = 0; i < msg_len; i += 4) {
110                 bar = (uint8_t *)bp->bar0 + i;
111                 rte_write32(*data, bar);
112                 data++;
113         }
114
115         /* Zero the rest of the request space */
116         for (; i < bp->max_req_len; i += 4) {
117                 bar = (uint8_t *)bp->bar0 + i;
118                 rte_write32(0, bar);
119         }
120
121         /* Ring channel doorbell */
122         bar = (uint8_t *)bp->bar0 + 0x100;
123         rte_write32(1, bar);
124
125         /* Poll for the valid bit */
126         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
127                 /* Sanity check on the resp->resp_len */
128                 rte_rmb();
129                 if (resp->resp_len && resp->resp_len <=
130                                 bp->max_resp_len) {
131                         /* Last byte of resp contains the valid key */
132                         valid = (uint8_t *)resp + resp->resp_len - 1;
133                         if (*valid == HWRM_RESP_VALID_KEY)
134                                 break;
135                 }
136                 rte_delay_us(600);
137         }
138
139         if (i >= HWRM_CMD_TIMEOUT) {
140                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
141                         req->req_type);
142                 goto err_ret;
143         }
144         return 0;
145
146 err_ret:
147         return -1;
148 }
149
150 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
151 {
152         int rc;
153
154         rte_spinlock_lock(&bp->hwrm_lock);
155         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
156         rte_spinlock_unlock(&bp->hwrm_lock);
157         return rc;
158 }
159
160 #define HWRM_PREP(req, type, cr, resp) \
161         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163         req.cmpl_ring = rte_cpu_to_le_16(cr); \
164         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165         req.target_id = rte_cpu_to_le_16(0xffff); \
166         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
167
168 #define HWRM_CHECK_RESULT \
169         { \
170                 if (rc) { \
171                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
172                                 __func__, rc); \
173                         return rc; \
174                 } \
175                 if (resp->error_code) { \
176                         rc = rte_le_to_cpu_16(resp->error_code); \
177                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
178                         return rc; \
179                 } \
180         }
181
182 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
183 {
184         int rc = 0;
185         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
186         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
187
188         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
189         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
190         req.mask = 0;
191
192         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
193
194         HWRM_CHECK_RESULT;
195
196         return rc;
197 }
198
199 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
200 {
201         int rc = 0;
202         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
204         uint32_t mask = 0;
205
206         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
207         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
208
209         /* FIXME add multicast flag, when multicast adding options is supported
210          * by ethtool.
211          */
212         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
213                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
214         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
215                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
216         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
217                                     mask);
218
219         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
220
221         HWRM_CHECK_RESULT;
222
223         return rc;
224 }
225
226 int bnxt_hwrm_clear_filter(struct bnxt *bp,
227                            struct bnxt_filter_info *filter)
228 {
229         int rc = 0;
230         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
231         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
232
233         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
234
235         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
236
237         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238
239         HWRM_CHECK_RESULT;
240
241         filter->fw_l2_filter_id = -1;
242
243         return 0;
244 }
245
246 int bnxt_hwrm_set_filter(struct bnxt *bp,
247                          struct bnxt_vnic_info *vnic,
248                          struct bnxt_filter_info *filter)
249 {
250         int rc = 0;
251         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
252         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
253         uint32_t enables = 0;
254
255         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
256
257         req.flags = rte_cpu_to_le_32(filter->flags);
258
259         enables = filter->enables |
260               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
261         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
262
263         if (enables &
264             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
265                 memcpy(req.l2_addr, filter->l2_addr,
266                        ETHER_ADDR_LEN);
267         if (enables &
268             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
269                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
270                        ETHER_ADDR_LEN);
271         if (enables &
272             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
273                 req.l2_ovlan = filter->l2_ovlan;
274         if (enables &
275             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
276                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
277
278         req.enables = rte_cpu_to_le_32(enables);
279
280         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
281
282         HWRM_CHECK_RESULT;
283
284         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
285
286         return rc;
287 }
288
289 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
290 {
291         int rc;
292         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
293         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
294
295         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
296
297         memcpy(req.encap_request, fwd_cmd,
298                sizeof(req.encap_request));
299
300         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
301
302         HWRM_CHECK_RESULT;
303
304         return rc;
305 }
306
307 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
308 {
309         int rc = 0;
310         struct hwrm_func_qcaps_input req = {.req_type = 0 };
311         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
312         uint16_t new_max_vfs;
313         int i;
314
315         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
316
317         req.fid = rte_cpu_to_le_16(0xffff);
318
319         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
320
321         HWRM_CHECK_RESULT;
322
323         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
324         if (BNXT_PF(bp)) {
325                 bp->pf.port_id = resp->port_id;
326                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
327                 new_max_vfs = bp->pdev->max_vfs;
328                 if (new_max_vfs != bp->pf.max_vfs) {
329                         if (bp->pf.vf_info)
330                                 rte_free(bp->pf.vf_info);
331                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
332                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
333                         bp->pf.max_vfs = new_max_vfs;
334                         for (i = 0; i < new_max_vfs; i++) {
335                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
336                                 bp->pf.vf_info[i].vlan_table =
337                                         rte_zmalloc("VF VLAN table",
338                                                     getpagesize(),
339                                                     getpagesize());
340                                 if (bp->pf.vf_info[i].vlan_table == NULL)
341                                         RTE_LOG(ERR, PMD,
342                                         "Fail to alloc VLAN table for VF %d\n",
343                                         i);
344                                 else
345                                         rte_mem_lock_page(
346                                                 bp->pf.vf_info[i].vlan_table);
347                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
348                         }
349                 }
350         }
351
352         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
353         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
354         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
355         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
356         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
357         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
358         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
359         /* TODO: For now, do not support VMDq/RFS on VFs. */
360         if (BNXT_PF(bp)) {
361                 if (bp->pf.max_vfs)
362                         bp->max_vnics = 1;
363                 else
364                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
365         } else {
366                 bp->max_vnics = 1;
367         }
368         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
369         if (BNXT_PF(bp))
370                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
371
372         return rc;
373 }
374
375 int bnxt_hwrm_func_reset(struct bnxt *bp)
376 {
377         int rc = 0;
378         struct hwrm_func_reset_input req = {.req_type = 0 };
379         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
380
381         HWRM_PREP(req, FUNC_RESET, -1, resp);
382
383         req.enables = rte_cpu_to_le_32(0);
384
385         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
386
387         HWRM_CHECK_RESULT;
388
389         return rc;
390 }
391
392 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
393 {
394         int rc;
395         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
396         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
397
398         if (bp->flags & BNXT_FLAG_REGISTERED)
399                 return 0;
400
401         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
402         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
403                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
404         req.ver_maj = RTE_VER_YEAR;
405         req.ver_min = RTE_VER_MONTH;
406         req.ver_upd = RTE_VER_MINOR;
407
408         if (BNXT_PF(bp)) {
409                 req.enables |= rte_cpu_to_le_32(
410                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
411                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
412                        RTE_MIN(sizeof(req.vf_req_fwd),
413                                sizeof(bp->pf.vf_req_fwd)));
414         }
415
416         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
417         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
418
419         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
420
421         HWRM_CHECK_RESULT;
422
423         bp->flags |= BNXT_FLAG_REGISTERED;
424
425         return rc;
426 }
427
428 int bnxt_hwrm_ver_get(struct bnxt *bp)
429 {
430         int rc = 0;
431         struct hwrm_ver_get_input req = {.req_type = 0 };
432         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
433         uint32_t my_version;
434         uint32_t fw_version;
435         uint16_t max_resp_len;
436         char type[RTE_MEMZONE_NAMESIZE];
437
438         HWRM_PREP(req, VER_GET, -1, resp);
439
440         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
441         req.hwrm_intf_min = HWRM_VERSION_MINOR;
442         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
443
444         /*
445          * Hold the lock since we may be adjusting the response pointers.
446          */
447         rte_spinlock_lock(&bp->hwrm_lock);
448         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
449
450         HWRM_CHECK_RESULT;
451
452         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
453                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
454                 resp->hwrm_intf_upd,
455                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
456         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
457                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
458         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
459                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
460
461         my_version = HWRM_VERSION_MAJOR << 16;
462         my_version |= HWRM_VERSION_MINOR << 8;
463         my_version |= HWRM_VERSION_UPDATE;
464
465         fw_version = resp->hwrm_intf_maj << 16;
466         fw_version |= resp->hwrm_intf_min << 8;
467         fw_version |= resp->hwrm_intf_upd;
468
469         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
470                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
471                 rc = -EINVAL;
472                 goto error;
473         }
474
475         if (my_version != fw_version) {
476                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
477                 if (my_version < fw_version) {
478                         RTE_LOG(INFO, PMD,
479                                 "Firmware API version is newer than driver.\n");
480                         RTE_LOG(INFO, PMD,
481                                 "The driver may be missing features.\n");
482                 } else {
483                         RTE_LOG(INFO, PMD,
484                                 "Firmware API version is older than driver.\n");
485                         RTE_LOG(INFO, PMD,
486                                 "Not all driver features may be functional.\n");
487                 }
488         }
489
490         if (bp->max_req_len > resp->max_req_win_len) {
491                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
492                 rc = -EINVAL;
493         }
494         bp->max_req_len = resp->max_req_win_len;
495         max_resp_len = resp->max_resp_len;
496         if (bp->max_resp_len != max_resp_len) {
497                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
498                         bp->pdev->addr.domain, bp->pdev->addr.bus,
499                         bp->pdev->addr.devid, bp->pdev->addr.function);
500
501                 rte_free(bp->hwrm_cmd_resp_addr);
502
503                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
504                 if (bp->hwrm_cmd_resp_addr == NULL) {
505                         rc = -ENOMEM;
506                         goto error;
507                 }
508                 bp->hwrm_cmd_resp_dma_addr =
509                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
510                 bp->max_resp_len = max_resp_len;
511         }
512
513 error:
514         rte_spinlock_unlock(&bp->hwrm_lock);
515         return rc;
516 }
517
518 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
519 {
520         int rc;
521         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
522         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
523
524         if (!(bp->flags & BNXT_FLAG_REGISTERED))
525                 return 0;
526
527         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
528         req.flags = flags;
529
530         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
531
532         HWRM_CHECK_RESULT;
533
534         bp->flags &= ~BNXT_FLAG_REGISTERED;
535
536         return rc;
537 }
538
539 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
540 {
541         int rc = 0;
542         struct hwrm_port_phy_cfg_input req = {0};
543         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
544         uint32_t enables = 0;
545
546         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
547
548         if (conf->link_up) {
549                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
550                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
551                 /*
552                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
553                  * any auto mode, even "none".
554                  */
555                 if (!conf->link_speed) {
556                         req.auto_mode |= conf->auto_mode;
557                         enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
558                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
559                         enables |=
560                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
561                         req.auto_link_speed = bp->link_info.auto_link_speed;
562                         enables |=
563                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
564                 }
565                 req.auto_duplex = conf->duplex;
566                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
567                 req.auto_pause = conf->auto_pause;
568                 req.force_pause = conf->force_pause;
569                 /* Set force_pause if there is no auto or if there is a force */
570                 if (req.auto_pause && !req.force_pause)
571                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
572                 else
573                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
574
575                 req.enables = rte_cpu_to_le_32(enables);
576         } else {
577                 req.flags =
578                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
579                 RTE_LOG(INFO, PMD, "Force Link Down\n");
580         }
581
582         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
583
584         HWRM_CHECK_RESULT;
585
586         return rc;
587 }
588
589 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
590                                    struct bnxt_link_info *link_info)
591 {
592         int rc = 0;
593         struct hwrm_port_phy_qcfg_input req = {0};
594         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
595
596         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
597
598         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
599
600         HWRM_CHECK_RESULT;
601
602         link_info->phy_link_status = resp->link;
603         if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
604                 link_info->link_up = 1;
605                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
606         } else {
607                 link_info->link_up = 0;
608                 link_info->link_speed = 0;
609         }
610         link_info->duplex = resp->duplex;
611         link_info->pause = resp->pause;
612         link_info->auto_pause = resp->auto_pause;
613         link_info->force_pause = resp->force_pause;
614         link_info->auto_mode = resp->auto_mode;
615
616         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
617         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
618         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
619         link_info->phy_ver[0] = resp->phy_maj;
620         link_info->phy_ver[1] = resp->phy_min;
621         link_info->phy_ver[2] = resp->phy_bld;
622
623         return rc;
624 }
625
626 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
627 {
628         int rc = 0;
629         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
630         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
631
632         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
633
634         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
635
636         HWRM_CHECK_RESULT;
637
638 #define GET_QUEUE_INFO(x) \
639         bp->cos_queue[x].id = resp->queue_id##x; \
640         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
641
642         GET_QUEUE_INFO(0);
643         GET_QUEUE_INFO(1);
644         GET_QUEUE_INFO(2);
645         GET_QUEUE_INFO(3);
646         GET_QUEUE_INFO(4);
647         GET_QUEUE_INFO(5);
648         GET_QUEUE_INFO(6);
649         GET_QUEUE_INFO(7);
650
651         return rc;
652 }
653
654 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
655                          struct bnxt_ring *ring,
656                          uint32_t ring_type, uint32_t map_index,
657                          uint32_t stats_ctx_id)
658 {
659         int rc = 0;
660         struct hwrm_ring_alloc_input req = {.req_type = 0 };
661         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
662
663         HWRM_PREP(req, RING_ALLOC, -1, resp);
664
665         req.enables = rte_cpu_to_le_32(0);
666
667         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
668         req.fbo = rte_cpu_to_le_32(0);
669         /* Association of ring index with doorbell index */
670         req.logical_id = rte_cpu_to_le_16(map_index);
671
672         switch (ring_type) {
673         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
674                 req.queue_id = bp->cos_queue[0].id;
675                 /* FALLTHROUGH */
676         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
677                 req.ring_type = ring_type;
678                 req.cmpl_ring_id =
679                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
680                 req.length = rte_cpu_to_le_32(ring->ring_size);
681                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
682                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
683                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
684                 break;
685         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
686                 req.ring_type = ring_type;
687                 /*
688                  * TODO: Some HWRM versions crash with
689                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
690                  */
691                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
692                 req.length = rte_cpu_to_le_32(ring->ring_size);
693                 break;
694         default:
695                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
696                         ring_type);
697                 return -1;
698         }
699
700         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
701
702         if (rc || resp->error_code) {
703                 if (rc == 0 && resp->error_code)
704                         rc = rte_le_to_cpu_16(resp->error_code);
705                 switch (ring_type) {
706                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
707                         RTE_LOG(ERR, PMD,
708                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
709                         return rc;
710                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
711                         RTE_LOG(ERR, PMD,
712                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
713                         return rc;
714                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
715                         RTE_LOG(ERR, PMD,
716                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
717                         return rc;
718                 default:
719                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
720                         return rc;
721                 }
722         }
723
724         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
725         return rc;
726 }
727
728 int bnxt_hwrm_ring_free(struct bnxt *bp,
729                         struct bnxt_ring *ring, uint32_t ring_type)
730 {
731         int rc;
732         struct hwrm_ring_free_input req = {.req_type = 0 };
733         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
734
735         HWRM_PREP(req, RING_FREE, -1, resp);
736
737         req.ring_type = ring_type;
738         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
739
740         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
741
742         if (rc || resp->error_code) {
743                 if (rc == 0 && resp->error_code)
744                         rc = rte_le_to_cpu_16(resp->error_code);
745
746                 switch (ring_type) {
747                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
748                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
749                                 rc);
750                         return rc;
751                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
752                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
753                                 rc);
754                         return rc;
755                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
756                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
757                                 rc);
758                         return rc;
759                 default:
760                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
761                         return rc;
762                 }
763         }
764         return 0;
765 }
766
767 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
768 {
769         int rc = 0;
770         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
771         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
772
773         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
774
775         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
776         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
777         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
778         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
779
780         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
781
782         HWRM_CHECK_RESULT;
783
784         bp->grp_info[idx].fw_grp_id =
785             rte_le_to_cpu_16(resp->ring_group_id);
786
787         return rc;
788 }
789
790 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
791 {
792         int rc;
793         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
794         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
795
796         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
797
798         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
799
800         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
801
802         HWRM_CHECK_RESULT;
803
804         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
805         return rc;
806 }
807
808 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
809 {
810         int rc = 0;
811         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
812         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
813
814         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
815
816         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
817                 return rc;
818
819         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
820         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
821
822         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
823
824         HWRM_CHECK_RESULT;
825
826         return rc;
827 }
828
829 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
830                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
831 {
832         int rc;
833         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
834         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
835
836         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
837
838         req.update_period_ms = rte_cpu_to_le_32(1000);
839
840         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
841         req.stats_dma_addr =
842             rte_cpu_to_le_64(cpr->hw_stats_map);
843
844         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
845
846         HWRM_CHECK_RESULT;
847
848         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
849         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
850
851         return rc;
852 }
853
854 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
855                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
856 {
857         int rc;
858         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
859         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
860
861         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
862
863         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
864         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
865
866         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
867
868         HWRM_CHECK_RESULT;
869
870         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
871         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
872
873         return rc;
874 }
875
876 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
877 {
878         int rc = 0, i, j;
879         struct hwrm_vnic_alloc_input req = { 0 };
880         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
881
882         /* map ring groups to this vnic */
883         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
884                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
885                         RTE_LOG(ERR, PMD,
886                                 "Not enough ring groups avail:%x req:%x\n", j,
887                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
888                         break;
889                 }
890                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
891         }
892         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
893         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
894         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
895         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
896         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
897                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
898         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
899
900         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
901
902         HWRM_CHECK_RESULT;
903
904         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
905         return rc;
906 }
907
908 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
909 {
910         int rc = 0;
911         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
912         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
913         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
914
915         HWRM_PREP(req, VNIC_CFG, -1, resp);
916
917         /* Only RSS support for now TBD: COS & LB */
918         req.enables =
919             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
920                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
921         if (vnic->lb_rule != 0xffff)
922                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
923         if (vnic->cos_rule != 0xffff)
924                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
925         if (vnic->rss_rule != 0xffff)
926                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
927         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
928         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
929         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
930         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
931         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
932         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
933         req.mru = rte_cpu_to_le_16(vnic->mru);
934         if (vnic->func_default)
935                 req.flags |=
936                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
937         if (vnic->vlan_strip)
938                 req.flags |=
939                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
940         if (vnic->bd_stall)
941                 req.flags |=
942                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
943         if (vnic->roce_dual)
944                 req.flags |= rte_cpu_to_le_32(
945                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
946         if (vnic->roce_only)
947                 req.flags |= rte_cpu_to_le_32(
948                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
949         if (vnic->rss_dflt_cr)
950                 req.flags |= rte_cpu_to_le_32(
951                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
952
953         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
954
955         HWRM_CHECK_RESULT;
956
957         return rc;
958 }
959
960 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
961                 int16_t fw_vf_id)
962 {
963         int rc = 0;
964         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
965         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
966
967         HWRM_PREP(req, VNIC_QCFG, -1, resp);
968
969         req.enables =
970                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
971         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
972         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
973
974         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
975
976         HWRM_CHECK_RESULT;
977
978         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
979         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
980         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
981         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
982         vnic->mru = rte_le_to_cpu_16(resp->mru);
983         vnic->func_default = rte_le_to_cpu_32(
984                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
985         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
986                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
987         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
988                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
989         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
990                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
991         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
992                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
993         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
994                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
995
996         return rc;
997 }
998
999 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1000 {
1001         int rc = 0;
1002         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1003         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1004                                                 bp->hwrm_cmd_resp_addr;
1005
1006         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1007
1008         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1009
1010         HWRM_CHECK_RESULT;
1011
1012         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1013
1014         return rc;
1015 }
1016
1017 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1018 {
1019         int rc = 0;
1020         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1021         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1022                                                 bp->hwrm_cmd_resp_addr;
1023
1024         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1025
1026         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1027
1028         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1029
1030         HWRM_CHECK_RESULT;
1031
1032         vnic->rss_rule = INVALID_HW_RING_ID;
1033
1034         return rc;
1035 }
1036
1037 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1038 {
1039         int rc = 0;
1040         struct hwrm_vnic_free_input req = {.req_type = 0 };
1041         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1042
1043         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1044                 return rc;
1045
1046         HWRM_PREP(req, VNIC_FREE, -1, resp);
1047
1048         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1049
1050         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1051
1052         HWRM_CHECK_RESULT;
1053
1054         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1055         return rc;
1056 }
1057
1058 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1059                            struct bnxt_vnic_info *vnic)
1060 {
1061         int rc = 0;
1062         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1063         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1064
1065         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1066
1067         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1068
1069         req.ring_grp_tbl_addr =
1070             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1071         req.hash_key_tbl_addr =
1072             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1073         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1074
1075         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1076
1077         HWRM_CHECK_RESULT;
1078
1079         return rc;
1080 }
1081
1082 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1083 {
1084         struct hwrm_func_cfg_input req = {0};
1085         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1086         int rc;
1087
1088         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1089         req.enables = rte_cpu_to_le_32(
1090                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1091         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1092         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1093
1094         HWRM_PREP(req, FUNC_CFG, -1, resp);
1095
1096         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1097         HWRM_CHECK_RESULT;
1098
1099         bp->pf.vf_info[vf].random_mac = false;
1100
1101         return rc;
1102 }
1103
1104 /*
1105  * HWRM utility functions
1106  */
1107
1108 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1109 {
1110         unsigned int i;
1111         int rc = 0;
1112
1113         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1114                 struct bnxt_tx_queue *txq;
1115                 struct bnxt_rx_queue *rxq;
1116                 struct bnxt_cp_ring_info *cpr;
1117
1118                 if (i >= bp->rx_cp_nr_rings) {
1119                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1120                         cpr = txq->cp_ring;
1121                 } else {
1122                         rxq = bp->rx_queues[i];
1123                         cpr = rxq->cp_ring;
1124                 }
1125
1126                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1127                 if (rc)
1128                         return rc;
1129         }
1130         return 0;
1131 }
1132
1133 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1134 {
1135         int rc;
1136         unsigned int i;
1137         struct bnxt_cp_ring_info *cpr;
1138
1139         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1140                 unsigned int idx = i + 1;
1141
1142                 if (i >= bp->rx_cp_nr_rings)
1143                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1144                 else
1145                         cpr = bp->rx_queues[i]->cp_ring;
1146                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1147                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1148                         if (rc)
1149                                 return rc;
1150                 }
1151         }
1152         return 0;
1153 }
1154
1155 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1156 {
1157         unsigned int i;
1158         int rc = 0;
1159
1160         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1161                 struct bnxt_tx_queue *txq;
1162                 struct bnxt_rx_queue *rxq;
1163                 struct bnxt_cp_ring_info *cpr;
1164                 unsigned int idx = i + 1;
1165
1166                 if (i >= bp->rx_cp_nr_rings) {
1167                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1168                         cpr = txq->cp_ring;
1169                 } else {
1170                         rxq = bp->rx_queues[i];
1171                         cpr = rxq->cp_ring;
1172                 }
1173
1174                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1175
1176                 if (rc)
1177                         return rc;
1178         }
1179         return rc;
1180 }
1181
1182 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1183 {
1184         uint16_t i;
1185         uint32_t rc = 0;
1186
1187         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1188                 unsigned int idx = i + 1;
1189
1190                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1191                         RTE_LOG(ERR, PMD,
1192                                 "Attempt to free invalid ring group %d\n",
1193                                 idx);
1194                         continue;
1195                 }
1196
1197                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1198
1199                 if (rc)
1200                         return rc;
1201         }
1202         return rc;
1203 }
1204
1205 static void bnxt_free_cp_ring(struct bnxt *bp,
1206                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1207 {
1208         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1209
1210         bnxt_hwrm_ring_free(bp, cp_ring,
1211                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1212         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1213         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1214         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1215                         sizeof(*cpr->cp_desc_ring));
1216         cpr->cp_raw_cons = 0;
1217 }
1218
1219 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1220 {
1221         unsigned int i;
1222         int rc = 0;
1223
1224         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1225                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1226                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1227                 struct bnxt_ring *ring = txr->tx_ring_struct;
1228                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1229                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1230
1231                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1232                         bnxt_hwrm_ring_free(bp, ring,
1233                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1234                         ring->fw_ring_id = INVALID_HW_RING_ID;
1235                         memset(txr->tx_desc_ring, 0,
1236                                         txr->tx_ring_struct->ring_size *
1237                                         sizeof(*txr->tx_desc_ring));
1238                         memset(txr->tx_buf_ring, 0,
1239                                         txr->tx_ring_struct->ring_size *
1240                                         sizeof(*txr->tx_buf_ring));
1241                         txr->tx_prod = 0;
1242                         txr->tx_cons = 0;
1243                 }
1244                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1245                         bnxt_free_cp_ring(bp, cpr, idx);
1246         }
1247
1248         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1249                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1250                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1251                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1252                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1253                 unsigned int idx = i + 1;
1254
1255                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1256                         bnxt_hwrm_ring_free(bp, ring,
1257                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1258                         ring->fw_ring_id = INVALID_HW_RING_ID;
1259                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1260                         memset(rxr->rx_desc_ring, 0,
1261                                         rxr->rx_ring_struct->ring_size *
1262                                         sizeof(*rxr->rx_desc_ring));
1263                         memset(rxr->rx_buf_ring, 0,
1264                                         rxr->rx_ring_struct->ring_size *
1265                                         sizeof(*rxr->rx_buf_ring));
1266                         rxr->rx_prod = 0;
1267                 }
1268                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1269                         bnxt_free_cp_ring(bp, cpr, idx);
1270         }
1271
1272         /* Default completion ring */
1273         {
1274                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1275
1276                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1277                         bnxt_free_cp_ring(bp, cpr, 0);
1278         }
1279
1280         return rc;
1281 }
1282
1283 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1284 {
1285         uint16_t i;
1286         uint32_t rc = 0;
1287
1288         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1289                 unsigned int idx = i + 1;
1290
1291                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1292                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1293                         continue;
1294
1295                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1296
1297                 if (rc)
1298                         return rc;
1299         }
1300         return rc;
1301 }
1302
1303 void bnxt_free_hwrm_resources(struct bnxt *bp)
1304 {
1305         /* Release memzone */
1306         rte_free(bp->hwrm_cmd_resp_addr);
1307         bp->hwrm_cmd_resp_addr = NULL;
1308         bp->hwrm_cmd_resp_dma_addr = 0;
1309 }
1310
1311 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1312 {
1313         struct rte_pci_device *pdev = bp->pdev;
1314         char type[RTE_MEMZONE_NAMESIZE];
1315
1316         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1317                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1318         bp->max_req_len = HWRM_MAX_REQ_LEN;
1319         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1320         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1321         if (bp->hwrm_cmd_resp_addr == NULL)
1322                 return -ENOMEM;
1323         bp->hwrm_cmd_resp_dma_addr =
1324                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1325         rte_spinlock_init(&bp->hwrm_lock);
1326
1327         return 0;
1328 }
1329
1330 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1331 {
1332         struct bnxt_filter_info *filter;
1333         int rc = 0;
1334
1335         STAILQ_FOREACH(filter, &vnic->filter, next) {
1336                 rc = bnxt_hwrm_clear_filter(bp, filter);
1337                 if (rc)
1338                         break;
1339         }
1340         return rc;
1341 }
1342
1343 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1344 {
1345         struct bnxt_filter_info *filter;
1346         int rc = 0;
1347
1348         STAILQ_FOREACH(filter, &vnic->filter, next) {
1349                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1350                 if (rc)
1351                         break;
1352         }
1353         return rc;
1354 }
1355
1356 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1357 {
1358         struct bnxt_vnic_info *vnic;
1359         unsigned int i;
1360
1361         if (bp->vnic_info == NULL)
1362                 return;
1363
1364         vnic = &bp->vnic_info[0];
1365         if (BNXT_PF(bp))
1366                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1367
1368         /* VNIC resources */
1369         for (i = 0; i < bp->nr_vnics; i++) {
1370                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1371
1372                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1373
1374                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1375                 bnxt_hwrm_vnic_free(bp, vnic);
1376         }
1377         /* Ring resources */
1378         bnxt_free_all_hwrm_rings(bp);
1379         bnxt_free_all_hwrm_ring_grps(bp);
1380         bnxt_free_all_hwrm_stat_ctxs(bp);
1381 }
1382
1383 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1384 {
1385         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1386
1387         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1388                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1389
1390         switch (conf_link_speed) {
1391         case ETH_LINK_SPEED_10M_HD:
1392         case ETH_LINK_SPEED_100M_HD:
1393                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1394         }
1395         return hw_link_duplex;
1396 }
1397
1398 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1399 {
1400         uint16_t eth_link_speed = 0;
1401
1402         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1403                 return ETH_LINK_SPEED_AUTONEG;
1404
1405         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1406         case ETH_LINK_SPEED_100M:
1407         case ETH_LINK_SPEED_100M_HD:
1408                 eth_link_speed =
1409                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1410                 break;
1411         case ETH_LINK_SPEED_1G:
1412                 eth_link_speed =
1413                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1414                 break;
1415         case ETH_LINK_SPEED_2_5G:
1416                 eth_link_speed =
1417                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1418                 break;
1419         case ETH_LINK_SPEED_10G:
1420                 eth_link_speed =
1421                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1422                 break;
1423         case ETH_LINK_SPEED_20G:
1424                 eth_link_speed =
1425                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1426                 break;
1427         case ETH_LINK_SPEED_25G:
1428                 eth_link_speed =
1429                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1430                 break;
1431         case ETH_LINK_SPEED_40G:
1432                 eth_link_speed =
1433                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1434                 break;
1435         case ETH_LINK_SPEED_50G:
1436                 eth_link_speed =
1437                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1438                 break;
1439         default:
1440                 RTE_LOG(ERR, PMD,
1441                         "Unsupported link speed %d; default to AUTO\n",
1442                         conf_link_speed);
1443                 break;
1444         }
1445         return eth_link_speed;
1446 }
1447
1448 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1449                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1450                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1451                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1452
1453 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1454 {
1455         uint32_t one_speed;
1456
1457         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1458                 return 0;
1459
1460         if (link_speed & ETH_LINK_SPEED_FIXED) {
1461                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1462
1463                 if (one_speed & (one_speed - 1)) {
1464                         RTE_LOG(ERR, PMD,
1465                                 "Invalid advertised speeds (%u) for port %u\n",
1466                                 link_speed, port_id);
1467                         return -EINVAL;
1468                 }
1469                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1470                         RTE_LOG(ERR, PMD,
1471                                 "Unsupported advertised speed (%u) for port %u\n",
1472                                 link_speed, port_id);
1473                         return -EINVAL;
1474                 }
1475         } else {
1476                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1477                         RTE_LOG(ERR, PMD,
1478                                 "Unsupported advertised speeds (%u) for port %u\n",
1479                                 link_speed, port_id);
1480                         return -EINVAL;
1481                 }
1482         }
1483         return 0;
1484 }
1485
1486 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1487 {
1488         uint16_t ret = 0;
1489
1490         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1491                 link_speed = BNXT_SUPPORTED_SPEEDS;
1492
1493         if (link_speed & ETH_LINK_SPEED_100M)
1494                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1495         if (link_speed & ETH_LINK_SPEED_100M_HD)
1496                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1497         if (link_speed & ETH_LINK_SPEED_1G)
1498                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1499         if (link_speed & ETH_LINK_SPEED_2_5G)
1500                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1501         if (link_speed & ETH_LINK_SPEED_10G)
1502                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1503         if (link_speed & ETH_LINK_SPEED_20G)
1504                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1505         if (link_speed & ETH_LINK_SPEED_25G)
1506                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1507         if (link_speed & ETH_LINK_SPEED_40G)
1508                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1509         if (link_speed & ETH_LINK_SPEED_50G)
1510                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1511         return ret;
1512 }
1513
1514 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1515 {
1516         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1517
1518         switch (hw_link_speed) {
1519         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1520                 eth_link_speed = ETH_SPEED_NUM_100M;
1521                 break;
1522         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1523                 eth_link_speed = ETH_SPEED_NUM_1G;
1524                 break;
1525         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1526                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1527                 break;
1528         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1529                 eth_link_speed = ETH_SPEED_NUM_10G;
1530                 break;
1531         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1532                 eth_link_speed = ETH_SPEED_NUM_20G;
1533                 break;
1534         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1535                 eth_link_speed = ETH_SPEED_NUM_25G;
1536                 break;
1537         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1538                 eth_link_speed = ETH_SPEED_NUM_40G;
1539                 break;
1540         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1541                 eth_link_speed = ETH_SPEED_NUM_50G;
1542                 break;
1543         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1544         default:
1545                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1546                         hw_link_speed);
1547                 break;
1548         }
1549         return eth_link_speed;
1550 }
1551
1552 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1553 {
1554         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1555
1556         switch (hw_link_duplex) {
1557         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1558         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1559                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1560                 break;
1561         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1562                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1563                 break;
1564         default:
1565                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1566                         hw_link_duplex);
1567                 break;
1568         }
1569         return eth_link_duplex;
1570 }
1571
1572 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1573 {
1574         int rc = 0;
1575         struct bnxt_link_info *link_info = &bp->link_info;
1576
1577         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1578         if (rc) {
1579                 RTE_LOG(ERR, PMD,
1580                         "Get link config failed with rc %d\n", rc);
1581                 goto exit;
1582         }
1583         if (link_info->link_up)
1584                 link->link_speed =
1585                         bnxt_parse_hw_link_speed(link_info->link_speed);
1586         else
1587                 link->link_speed = ETH_LINK_SPEED_10M;
1588         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1589         link->link_status = link_info->link_up;
1590         link->link_autoneg = link_info->auto_mode ==
1591                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1592                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1593 exit:
1594         return rc;
1595 }
1596
1597 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1598 {
1599         int rc = 0;
1600         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1601         struct bnxt_link_info link_req;
1602         uint16_t speed;
1603
1604         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1605                 return 0;
1606
1607         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1608                         bp->eth_dev->data->port_id);
1609         if (rc)
1610                 goto error;
1611
1612         memset(&link_req, 0, sizeof(link_req));
1613         link_req.link_up = link_up;
1614         if (!link_up)
1615                 goto port_phy_cfg;
1616
1617         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1618         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1619         if (speed == 0) {
1620                 link_req.phy_flags |=
1621                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1622                 link_req.auto_mode =
1623                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1624                 link_req.auto_link_speed_mask =
1625                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1626         } else {
1627                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1628                 link_req.link_speed = speed;
1629                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1630         }
1631         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1632         link_req.auto_pause = bp->link_info.auto_pause;
1633         link_req.force_pause = bp->link_info.force_pause;
1634
1635 port_phy_cfg:
1636         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1637         if (rc) {
1638                 RTE_LOG(ERR, PMD,
1639                         "Set link config failed with rc %d\n", rc);
1640         }
1641
1642         rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1643 error:
1644         return rc;
1645 }
1646
1647 /* JIRA 22088 */
1648 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1649 {
1650         struct hwrm_func_qcfg_input req = {0};
1651         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1652         int rc = 0;
1653
1654         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1655         req.fid = rte_cpu_to_le_16(0xffff);
1656
1657         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1658
1659         HWRM_CHECK_RESULT;
1660
1661         /* Hard Coded.. 0xfff VLAN ID mask */
1662         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1663
1664         switch (resp->port_partition_type) {
1665         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1666         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1667         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1668                 bp->port_partition_type = resp->port_partition_type;
1669                 break;
1670         default:
1671                 bp->port_partition_type = 0;
1672                 break;
1673         }
1674
1675         return rc;
1676 }
1677
1678 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1679                                    struct hwrm_func_qcaps_output *qcaps)
1680 {
1681         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1682         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1683                sizeof(qcaps->mac_address));
1684         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1685         qcaps->max_rx_rings = fcfg->num_rx_rings;
1686         qcaps->max_tx_rings = fcfg->num_tx_rings;
1687         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1688         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1689         qcaps->max_vfs = 0;
1690         qcaps->first_vf_id = 0;
1691         qcaps->max_vnics = fcfg->num_vnics;
1692         qcaps->max_decap_records = 0;
1693         qcaps->max_encap_records = 0;
1694         qcaps->max_tx_wm_flows = 0;
1695         qcaps->max_tx_em_flows = 0;
1696         qcaps->max_rx_wm_flows = 0;
1697         qcaps->max_rx_em_flows = 0;
1698         qcaps->max_flow_id = 0;
1699         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1700         qcaps->max_sp_tx_rings = 0;
1701         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1702 }
1703
1704 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1705 {
1706         struct hwrm_func_cfg_input req = {0};
1707         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1708         int rc;
1709
1710         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1711                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1712                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1713                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1714                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1715                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1716                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1717                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1718                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1719                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1720         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1721         req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1722                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1723         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1724                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1725         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1726         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1727         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1728         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1729         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1730         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1731         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1732         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1733         req.fid = rte_cpu_to_le_16(0xffff);
1734
1735         HWRM_PREP(req, FUNC_CFG, -1, resp);
1736
1737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1738         HWRM_CHECK_RESULT;
1739
1740         return rc;
1741 }
1742
1743 static void populate_vf_func_cfg_req(struct bnxt *bp,
1744                                      struct hwrm_func_cfg_input *req,
1745                                      int num_vfs)
1746 {
1747         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1748                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1749                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1750                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1751                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1752                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1753                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1754                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1755                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1756                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1757
1758         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1759                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1760         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1761                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1762         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1763                                                 (num_vfs + 1));
1764         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1765         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1766                                                (num_vfs + 1));
1767         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1768         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1769         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1770         /* TODO: For now, do not support VMDq/RFS on VFs. */
1771         req->num_vnics = rte_cpu_to_le_16(1);
1772         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1773                                                  (num_vfs + 1));
1774 }
1775
1776 static void reserve_resources_from_vf(struct bnxt *bp,
1777                                       struct hwrm_func_cfg_input *cfg_req,
1778                                       int vf)
1779 {
1780         struct hwrm_func_qcaps_input req = {0};
1781         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1782         int rc;
1783
1784         /* Get the actual allocated values now */
1785         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1786         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1787         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1788
1789         if (rc) {
1790                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1791                 copy_func_cfg_to_qcaps(cfg_req, resp);
1792         } else if (resp->error_code) {
1793                 rc = rte_le_to_cpu_16(resp->error_code);
1794                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1795                 copy_func_cfg_to_qcaps(cfg_req, resp);
1796         }
1797
1798         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1799         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1800         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1801         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1802         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1803         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1804         /*
1805          * TODO: While not supporting VMDq with VFs, max_vnics is always
1806          * forced to 1 in this case
1807          */
1808         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1809         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1810 }
1811
1812 static int update_pf_resource_max(struct bnxt *bp)
1813 {
1814         struct hwrm_func_qcfg_input req = {0};
1815         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1816         int rc;
1817
1818         /* And copy the allocated numbers into the pf struct */
1819         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1820         req.fid = rte_cpu_to_le_16(0xffff);
1821         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1822         HWRM_CHECK_RESULT;
1823
1824         /* Only TX ring value reflects actual allocation? TODO */
1825         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
1826         bp->pf.evb_mode = resp->evb_mode;
1827
1828         return rc;
1829 }
1830
1831 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
1832 {
1833         int rc;
1834
1835         if (!BNXT_PF(bp)) {
1836                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1837                 return -1;
1838         }
1839
1840         rc = bnxt_hwrm_func_qcaps(bp);
1841         if (rc)
1842                 return rc;
1843
1844         bp->pf.func_cfg_flags &=
1845                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1846                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1847         bp->pf.func_cfg_flags |=
1848                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
1849         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
1850         return rc;
1851 }
1852
1853 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
1854 {
1855         struct hwrm_func_cfg_input req = {0};
1856         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1857         int i;
1858         size_t sz;
1859         int rc = 0;
1860         size_t req_buf_sz;
1861
1862         if (!BNXT_PF(bp)) {
1863                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1864                 return -1;
1865         }
1866
1867         rc = bnxt_hwrm_func_qcaps(bp);
1868
1869         if (rc)
1870                 return rc;
1871
1872         bp->pf.active_vfs = num_vfs;
1873
1874         /*
1875          * First, configure the PF to only use one TX ring.  This ensures that
1876          * there are enough rings for all VFs.
1877          *
1878          * If we don't do this, when we call func_alloc() later, we will lock
1879          * extra rings to the PF that won't be available during func_cfg() of
1880          * the VFs.
1881          *
1882          * This has been fixed with firmware versions above 20.6.54
1883          */
1884         bp->pf.func_cfg_flags &=
1885                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1886                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1887         bp->pf.func_cfg_flags |=
1888                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
1889         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
1890         if (rc)
1891                 return rc;
1892
1893         /*
1894          * Now, create and register a buffer to hold forwarded VF requests
1895          */
1896         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
1897         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
1898                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
1899         if (bp->pf.vf_req_buf == NULL) {
1900                 rc = -ENOMEM;
1901                 goto error_free;
1902         }
1903         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
1904                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
1905         for (i = 0; i < num_vfs; i++)
1906                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
1907                                         (i * HWRM_MAX_REQ_LEN);
1908
1909         rc = bnxt_hwrm_func_buf_rgtr(bp);
1910         if (rc)
1911                 goto error_free;
1912
1913         populate_vf_func_cfg_req(bp, &req, num_vfs);
1914
1915         bp->pf.active_vfs = 0;
1916         for (i = 0; i < num_vfs; i++) {
1917                 HWRM_PREP(req, FUNC_CFG, -1, resp);
1918                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
1919                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
1920                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1921
1922                 /* Clear enable flag for next pass */
1923                 req.enables &= ~rte_cpu_to_le_32(
1924                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1925
1926                 if (rc || resp->error_code) {
1927                         RTE_LOG(ERR, PMD,
1928                                 "Failed to initizlie VF %d\n", i);
1929                         RTE_LOG(ERR, PMD,
1930                                 "Not all VFs available. (%d, %d)\n",
1931                                 rc, resp->error_code);
1932                         break;
1933                 }
1934
1935                 reserve_resources_from_vf(bp, &req, i);
1936                 bp->pf.active_vfs++;
1937         }
1938
1939         /*
1940          * Now configure the PF to use "the rest" of the resources
1941          * We're using STD_TX_RING_MODE here though which will limit the TX
1942          * rings.  This will allow QoS to function properly.  Not setting this
1943          * will cause PF rings to break bandwidth settings.
1944          */
1945         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
1946         if (rc)
1947                 goto error_free;
1948
1949         rc = update_pf_resource_max(bp);
1950         if (rc)
1951                 goto error_free;
1952
1953         return rc;
1954
1955 error_free:
1956         bnxt_hwrm_func_buf_unrgtr(bp);
1957         return rc;
1958 }
1959
1960
1961 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
1962 {
1963         int rc = 0;
1964         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
1965         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
1966
1967         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
1968
1969         req.req_buf_num_pages = rte_cpu_to_le_16(1);
1970         req.req_buf_page_size = rte_cpu_to_le_16(
1971                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
1972         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
1973         req.req_buf_page_addr[0] =
1974                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
1975         if (req.req_buf_page_addr[0] == 0) {
1976                 RTE_LOG(ERR, PMD,
1977                         "unable to map buffer address to physical memory\n");
1978                 return -ENOMEM;
1979         }
1980
1981         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1982
1983         HWRM_CHECK_RESULT;
1984
1985         return rc;
1986 }
1987
1988 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
1989 {
1990         int rc = 0;
1991         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
1992         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1993
1994         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
1995
1996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1997
1998         HWRM_CHECK_RESULT;
1999
2000         return rc;
2001 }
2002
2003 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2004 {
2005         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2006         struct hwrm_func_cfg_input req = {0};
2007         int rc;
2008
2009         HWRM_PREP(req, FUNC_CFG, -1, resp);
2010         req.fid = rte_cpu_to_le_16(0xffff);
2011         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2012         req.enables = rte_cpu_to_le_32(
2013                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2014         req.async_event_cr = rte_cpu_to_le_16(
2015                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2016         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2017         HWRM_CHECK_RESULT;
2018
2019         return rc;
2020 }
2021
2022 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2023                               void *encaped, size_t ec_size)
2024 {
2025         int rc = 0;
2026         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2027         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2028
2029         if (ec_size > sizeof(req.encap_request))
2030                 return -1;
2031
2032         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2033
2034         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2035         memcpy(req.encap_request, encaped, ec_size);
2036
2037         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2038
2039         HWRM_CHECK_RESULT;
2040
2041         return rc;
2042 }