35fb7dca2565538bb45672ecdc778493a1d0935b
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                6000000
30 #define HWRM_SHORT_CMD_TIMEOUT          50000
31 #define HWRM_SPEC_CODE_1_8_3            0x10803
32 #define HWRM_VERSION_1_9_1              0x10901
33 #define HWRM_VERSION_1_9_2              0x10903
34
35 struct bnxt_plcmodes_cfg {
36         uint32_t        flags;
37         uint16_t        jumbo_thresh;
38         uint16_t        hds_offset;
39         uint16_t        hds_threshold;
40 };
41
42 static int page_getenum(size_t size)
43 {
44         if (size <= 1 << 4)
45                 return 4;
46         if (size <= 1 << 12)
47                 return 12;
48         if (size <= 1 << 13)
49                 return 13;
50         if (size <= 1 << 16)
51                 return 16;
52         if (size <= 1 << 21)
53                 return 21;
54         if (size <= 1 << 22)
55                 return 22;
56         if (size <= 1 << 30)
57                 return 30;
58         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
59         return sizeof(void *) * 8 - 1;
60 }
61
62 static int page_roundup(size_t size)
63 {
64         return 1 << page_getenum(size);
65 }
66
67 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
68                                   uint8_t *pg_attr,
69                                   uint64_t *pg_dir)
70 {
71         if (rmem->nr_pages > 1) {
72                 *pg_attr = 1;
73                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
74         } else {
75                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
76         }
77 }
78
79 /*
80  * HWRM Functions (sent to HWRM)
81  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
82  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
83  * command was failed by the ChiMP.
84  */
85
86 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
87                                   uint32_t msg_len, bool use_kong_mb)
88 {
89         unsigned int i;
90         struct input *req = msg;
91         struct output *resp = bp->hwrm_cmd_resp_addr;
92         uint32_t *data = msg;
93         uint8_t *bar;
94         uint8_t *valid;
95         uint16_t max_req_len = bp->max_req_len;
96         struct hwrm_short_input short_input = { 0 };
97         uint16_t bar_offset = use_kong_mb ?
98                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
99         uint16_t mb_trigger_offset = use_kong_mb ?
100                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
101         uint32_t timeout;
102
103         /* Do not send HWRM commands to firmware in error state */
104         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
105                 return 0;
106
107         /* For VER_GET command, set timeout as 50ms */
108         if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
109                 timeout = HWRM_SHORT_CMD_TIMEOUT;
110         else
111                 timeout = HWRM_CMD_TIMEOUT;
112
113         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
114             msg_len > bp->max_req_len) {
115                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
116
117                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
118                 memcpy(short_cmd_req, req, msg_len);
119
120                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
121                 short_input.signature = rte_cpu_to_le_16(
122                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
123                 short_input.size = rte_cpu_to_le_16(msg_len);
124                 short_input.req_addr =
125                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
126
127                 data = (uint32_t *)&short_input;
128                 msg_len = sizeof(short_input);
129
130                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
131         }
132
133         /* Write request msg to hwrm channel */
134         for (i = 0; i < msg_len; i += 4) {
135                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
136                 rte_write32(*data, bar);
137                 data++;
138         }
139
140         /* Zero the rest of the request space */
141         for (; i < max_req_len; i += 4) {
142                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
143                 rte_write32(0, bar);
144         }
145
146         /* Ring channel doorbell */
147         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
148         rte_write32(1, bar);
149         /*
150          * Make sure the channel doorbell ring command complete before
151          * reading the response to avoid getting stale or invalid
152          * responses.
153          */
154         rte_io_mb();
155
156         /* Poll for the valid bit */
157         for (i = 0; i < timeout; i++) {
158                 /* Sanity check on the resp->resp_len */
159                 rte_cio_rmb();
160                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
161                         /* Last byte of resp contains the valid key */
162                         valid = (uint8_t *)resp + resp->resp_len - 1;
163                         if (*valid == HWRM_RESP_VALID_KEY)
164                                 break;
165                 }
166                 rte_delay_us(1);
167         }
168
169         if (i >= timeout) {
170                 /* Suppress VER_GET timeout messages during reset recovery */
171                 if (bp->flags & BNXT_FLAG_FW_RESET &&
172                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
173                         return -ETIMEDOUT;
174
175                 PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
176                             req->req_type);
177                 return -ETIMEDOUT;
178         }
179         return 0;
180 }
181
182 /*
183  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
184  * spinlock, and does initial processing.
185  *
186  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
187  * releases the spinlock only if it returns.  If the regular int return codes
188  * are not used by the function, HWRM_CHECK_RESULT() should not be used
189  * directly, rather it should be copied and modified to suit the function.
190  *
191  * HWRM_UNLOCK() must be called after all response processing is completed.
192  */
193 #define HWRM_PREP(req, type, kong) do { \
194         rte_spinlock_lock(&bp->hwrm_lock); \
195         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
196         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
197         req.cmpl_ring = rte_cpu_to_le_16(-1); \
198         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
199                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
200         req.target_id = rte_cpu_to_le_16(0xffff); \
201         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
202 } while (0)
203
204 #define HWRM_CHECK_RESULT_SILENT() do {\
205         if (rc) { \
206                 rte_spinlock_unlock(&bp->hwrm_lock); \
207                 return rc; \
208         } \
209         if (resp->error_code) { \
210                 rc = rte_le_to_cpu_16(resp->error_code); \
211                 rte_spinlock_unlock(&bp->hwrm_lock); \
212                 return rc; \
213         } \
214 } while (0)
215
216 #define HWRM_CHECK_RESULT() do {\
217         if (rc) { \
218                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
219                 rte_spinlock_unlock(&bp->hwrm_lock); \
220                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
221                         rc = -EACCES; \
222                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
223                         rc = -ENOSPC; \
224                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
225                         rc = -EINVAL; \
226                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
227                         rc = -ENOTSUP; \
228                 else if (rc > 0) \
229                         rc = -EIO; \
230                 return rc; \
231         } \
232         if (resp->error_code) { \
233                 rc = rte_le_to_cpu_16(resp->error_code); \
234                 if (resp->resp_len >= 16) { \
235                         struct hwrm_err_output *tmp_hwrm_err_op = \
236                                                 (void *)resp; \
237                         PMD_DRV_LOG(ERR, \
238                                 "error %d:%d:%08x:%04x\n", \
239                                 rc, tmp_hwrm_err_op->cmd_err, \
240                                 rte_le_to_cpu_32(\
241                                         tmp_hwrm_err_op->opaque_0), \
242                                 rte_le_to_cpu_16(\
243                                         tmp_hwrm_err_op->opaque_1)); \
244                 } else { \
245                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
246                 } \
247                 rte_spinlock_unlock(&bp->hwrm_lock); \
248                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
249                         rc = -EACCES; \
250                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
251                         rc = -ENOSPC; \
252                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
253                         rc = -EINVAL; \
254                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
255                         rc = -ENOTSUP; \
256                 else if (rc > 0) \
257                         rc = -EIO; \
258                 return rc; \
259         } \
260 } while (0)
261
262 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
263
264 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
265 {
266         int rc = 0;
267         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
268         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
269
270         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
271         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
272         req.mask = 0;
273
274         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
275
276         HWRM_CHECK_RESULT();
277         HWRM_UNLOCK();
278
279         return rc;
280 }
281
282 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
283                                  struct bnxt_vnic_info *vnic,
284                                  uint16_t vlan_count,
285                                  struct bnxt_vlan_table_entry *vlan_table)
286 {
287         int rc = 0;
288         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
289         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
290         uint32_t mask = 0;
291
292         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
293                 return rc;
294
295         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
296         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
297
298         /* FIXME add multicast flag, when multicast adding options is supported
299          * by ethtool.
300          */
301         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
302                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
303         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
304                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
305         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
306                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
307         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
308                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
309         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
310                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
311         if (vnic->mc_addr_cnt) {
312                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
313                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
314                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
315         }
316         if (vlan_table) {
317                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
318                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
319                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
320                          rte_mem_virt2iova(vlan_table));
321                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
322         }
323         req.mask = rte_cpu_to_le_32(mask);
324
325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
326
327         HWRM_CHECK_RESULT();
328         HWRM_UNLOCK();
329
330         return rc;
331 }
332
333 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
334                         uint16_t vlan_count,
335                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
336 {
337         int rc = 0;
338         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
339         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
340                                                 bp->hwrm_cmd_resp_addr;
341
342         /*
343          * Older HWRM versions did not support this command, and the set_rx_mask
344          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
345          * removed from set_rx_mask call, and this command was added.
346          *
347          * This command is also present from 1.7.8.11 and higher,
348          * as well as 1.7.8.0
349          */
350         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
351                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
352                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
353                                         (11)))
354                                 return 0;
355                 }
356         }
357         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
358         req.fid = rte_cpu_to_le_16(fid);
359
360         req.vlan_tag_mask_tbl_addr =
361                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
362         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
363
364         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
365
366         HWRM_CHECK_RESULT();
367         HWRM_UNLOCK();
368
369         return rc;
370 }
371
372 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
373                            struct bnxt_filter_info *filter)
374 {
375         int rc = 0;
376         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
377         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
378
379         if (filter->fw_l2_filter_id == UINT64_MAX)
380                 return 0;
381
382         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
383
384         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
385
386         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
387
388         HWRM_CHECK_RESULT();
389         HWRM_UNLOCK();
390
391         filter->fw_l2_filter_id = UINT64_MAX;
392
393         return 0;
394 }
395
396 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
397                          uint16_t dst_id,
398                          struct bnxt_filter_info *filter)
399 {
400         int rc = 0;
401         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
402         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
403         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
404         const struct rte_eth_vmdq_rx_conf *conf =
405                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
406         uint32_t enables = 0;
407         uint16_t j = dst_id - 1;
408
409         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
410         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
411             conf->pool_map[j].pools & (1UL << j)) {
412                 PMD_DRV_LOG(DEBUG,
413                         "Add vlan %u to vmdq pool %u\n",
414                         conf->pool_map[j].vlan_id, j);
415
416                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
417                 filter->enables |=
418                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
419                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
420         }
421
422         if (filter->fw_l2_filter_id != UINT64_MAX)
423                 bnxt_hwrm_clear_l2_filter(bp, filter);
424
425         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
426
427         req.flags = rte_cpu_to_le_32(filter->flags);
428         req.flags |=
429         rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
430
431         enables = filter->enables |
432               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
433         req.dst_id = rte_cpu_to_le_16(dst_id);
434
435         if (enables &
436             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
437                 memcpy(req.l2_addr, filter->l2_addr,
438                        RTE_ETHER_ADDR_LEN);
439         if (enables &
440             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
441                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
442                        RTE_ETHER_ADDR_LEN);
443         if (enables &
444             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
445                 req.l2_ovlan = filter->l2_ovlan;
446         if (enables &
447             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
448                 req.l2_ivlan = filter->l2_ivlan;
449         if (enables &
450             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
451                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
452         if (enables &
453             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
454                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
455         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
456                 req.src_id = rte_cpu_to_le_32(filter->src_id);
457         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
458                 req.src_type = filter->src_type;
459
460         req.enables = rte_cpu_to_le_32(enables);
461
462         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
463
464         HWRM_CHECK_RESULT();
465
466         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
467         HWRM_UNLOCK();
468
469         return rc;
470 }
471
472 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
473 {
474         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
475         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
476         uint32_t flags = 0;
477         int rc;
478
479         if (!ptp)
480                 return 0;
481
482         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
483
484         if (ptp->rx_filter)
485                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
486         else
487                 flags |=
488                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
489         if (ptp->tx_tstamp_en)
490                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
491         else
492                 flags |=
493                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
494         req.flags = rte_cpu_to_le_32(flags);
495         req.enables = rte_cpu_to_le_32
496                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
497         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
498
499         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
500         HWRM_UNLOCK();
501
502         return rc;
503 }
504
505 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
506 {
507         int rc = 0;
508         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
509         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
510         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
511
512 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
513         if (ptp)
514                 return 0;
515
516         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
517
518         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
519
520         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
521
522         HWRM_CHECK_RESULT();
523
524         if (!BNXT_CHIP_THOR(bp) &&
525             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
526                 return 0;
527
528         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
529                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
530
531         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
532         if (!ptp)
533                 return -ENOMEM;
534
535         if (!BNXT_CHIP_THOR(bp)) {
536                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
537                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
538                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
539                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
540                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
541                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
542                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
543                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
544                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
545                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
546                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
547                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
548                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
549                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
550                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
551                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
552                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
553                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
554         }
555
556         ptp->bp = bp;
557         bp->ptp_cfg = ptp;
558
559         return 0;
560 }
561
562 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
563 {
564         int rc = 0;
565         struct hwrm_func_qcaps_input req = {.req_type = 0 };
566         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
567         uint16_t new_max_vfs;
568         uint32_t flags;
569         int i;
570
571         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
572
573         req.fid = rte_cpu_to_le_16(0xffff);
574
575         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
576
577         HWRM_CHECK_RESULT();
578
579         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
580         flags = rte_le_to_cpu_32(resp->flags);
581         if (BNXT_PF(bp)) {
582                 bp->pf.port_id = resp->port_id;
583                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
584                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
585                 new_max_vfs = bp->pdev->max_vfs;
586                 if (new_max_vfs != bp->pf.max_vfs) {
587                         if (bp->pf.vf_info)
588                                 rte_free(bp->pf.vf_info);
589                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
590                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
591                         bp->pf.max_vfs = new_max_vfs;
592                         for (i = 0; i < new_max_vfs; i++) {
593                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
594                                 bp->pf.vf_info[i].vlan_table =
595                                         rte_zmalloc("VF VLAN table",
596                                                     getpagesize(),
597                                                     getpagesize());
598                                 if (bp->pf.vf_info[i].vlan_table == NULL)
599                                         PMD_DRV_LOG(ERR,
600                                         "Fail to alloc VLAN table for VF %d\n",
601                                         i);
602                                 else
603                                         rte_mem_lock_page(
604                                                 bp->pf.vf_info[i].vlan_table);
605                                 bp->pf.vf_info[i].vlan_as_table =
606                                         rte_zmalloc("VF VLAN AS table",
607                                                     getpagesize(),
608                                                     getpagesize());
609                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
610                                         PMD_DRV_LOG(ERR,
611                                         "Alloc VLAN AS table for VF %d fail\n",
612                                         i);
613                                 else
614                                         rte_mem_lock_page(
615                                                bp->pf.vf_info[i].vlan_as_table);
616                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
617                         }
618                 }
619         }
620
621         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
622         memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
623         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
624         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
625         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
626         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
627         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
628         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
629         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
630         if (!BNXT_CHIP_THOR(bp))
631                 bp->max_l2_ctx += bp->max_rx_em_flows;
632         /* TODO: For now, do not support VMDq/RFS on VFs. */
633         if (BNXT_PF(bp)) {
634                 if (bp->pf.max_vfs)
635                         bp->max_vnics = 1;
636                 else
637                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
638         } else {
639                 bp->max_vnics = 1;
640         }
641         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
642         if (BNXT_PF(bp)) {
643                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
644                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
645                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
646                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
647                         HWRM_UNLOCK();
648                         bnxt_hwrm_ptp_qcfg(bp);
649                 }
650         }
651
652         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
653                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
654
655         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
656                 bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
657                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
658         } else {
659                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
660         }
661
662         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
663                 bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
664         else
665                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
666
667         HWRM_UNLOCK();
668
669         return rc;
670 }
671
672 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
673 {
674         int rc;
675
676         rc = __bnxt_hwrm_func_qcaps(bp);
677         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
678                 rc = bnxt_alloc_ctx_mem(bp);
679                 if (rc)
680                         return rc;
681
682                 rc = bnxt_hwrm_func_resc_qcaps(bp);
683                 if (!rc)
684                         bp->flags |= BNXT_FLAG_NEW_RM;
685         }
686
687         return rc;
688 }
689
690 int bnxt_hwrm_func_reset(struct bnxt *bp)
691 {
692         int rc = 0;
693         struct hwrm_func_reset_input req = {.req_type = 0 };
694         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
695
696         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
697
698         req.enables = rte_cpu_to_le_32(0);
699
700         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
701
702         HWRM_CHECK_RESULT();
703         HWRM_UNLOCK();
704
705         return rc;
706 }
707
708 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
709 {
710         int rc;
711         uint32_t flags = 0;
712         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
713         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
714
715         if (bp->flags & BNXT_FLAG_REGISTERED)
716                 return 0;
717
718         flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
719         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
720                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
721
722         /* PFs and trusted VFs should indicate the support of the
723          * Master capability on non Stingray platform
724          */
725         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
726                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
727
728         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
729         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
730                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
731         req.ver_maj = RTE_VER_YEAR;
732         req.ver_min = RTE_VER_MONTH;
733         req.ver_upd = RTE_VER_MINOR;
734
735         if (BNXT_PF(bp)) {
736                 req.enables |= rte_cpu_to_le_32(
737                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
738                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
739                        RTE_MIN(sizeof(req.vf_req_fwd),
740                                sizeof(bp->pf.vf_req_fwd)));
741
742                 /*
743                  * PF can sniff HWRM API issued by VF. This can be set up by
744                  * linux driver and inherited by the DPDK PF driver. Clear
745                  * this HWRM sniffer list in FW because DPDK PF driver does
746                  * not support this.
747                  */
748                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
749         }
750
751         req.flags = rte_cpu_to_le_32(flags);
752
753         req.async_event_fwd[0] |=
754                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
755                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
756                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
757                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
758                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
759         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
760                 req.async_event_fwd[0] |=
761                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
762         req.async_event_fwd[1] |=
763                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
764                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
765
766         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
767
768         HWRM_CHECK_RESULT();
769
770         flags = rte_le_to_cpu_32(resp->flags);
771         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
772                 bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
773
774         HWRM_UNLOCK();
775
776         bp->flags |= BNXT_FLAG_REGISTERED;
777
778         return rc;
779 }
780
781 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
782 {
783         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
784                 return 0;
785
786         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
787 }
788
789 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
790 {
791         int rc;
792         uint32_t flags = 0;
793         uint32_t enables;
794         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
795         struct hwrm_func_vf_cfg_input req = {0};
796
797         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
798
799         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
800                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
801                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
802                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
803                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
804
805         if (BNXT_HAS_RING_GRPS(bp)) {
806                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
807                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
808         }
809
810         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
811         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
812                                             AGG_RING_MULTIPLIER);
813         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
814                                              bp->tx_nr_rings +
815                                              BNXT_NUM_ASYNC_CPR(bp));
816         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
817                                               bp->tx_nr_rings +
818                                               BNXT_NUM_ASYNC_CPR(bp));
819         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
820         if (bp->vf_resv_strategy ==
821             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
822                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
823                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
824                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
825                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
826                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
827                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
828         }
829
830         if (test)
831                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
832                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
833                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
834                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
835                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
836                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
837
838         if (test && BNXT_HAS_RING_GRPS(bp))
839                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
840
841         req.flags = rte_cpu_to_le_32(flags);
842         req.enables |= rte_cpu_to_le_32(enables);
843
844         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
845
846         if (test)
847                 HWRM_CHECK_RESULT_SILENT();
848         else
849                 HWRM_CHECK_RESULT();
850
851         HWRM_UNLOCK();
852         return rc;
853 }
854
855 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
856 {
857         int rc;
858         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
859         struct hwrm_func_resource_qcaps_input req = {0};
860
861         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
862         req.fid = rte_cpu_to_le_16(0xffff);
863
864         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
865
866         HWRM_CHECK_RESULT();
867
868         if (BNXT_VF(bp)) {
869                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
870                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
871                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
872                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
873                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
874                 /* func_resource_qcaps does not return max_rx_em_flows.
875                  * So use the value provided by func_qcaps.
876                  */
877                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
878                 if (!BNXT_CHIP_THOR(bp))
879                         bp->max_l2_ctx += bp->max_rx_em_flows;
880                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
881                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
882         }
883         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
884         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
885         if (bp->vf_resv_strategy >
886             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
887                 bp->vf_resv_strategy =
888                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
889
890         HWRM_UNLOCK();
891         return rc;
892 }
893
894 int bnxt_hwrm_ver_get(struct bnxt *bp)
895 {
896         int rc = 0;
897         struct hwrm_ver_get_input req = {.req_type = 0 };
898         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
899         uint32_t fw_version;
900         uint16_t max_resp_len;
901         char type[RTE_MEMZONE_NAMESIZE];
902         uint32_t dev_caps_cfg;
903
904         bp->max_req_len = HWRM_MAX_REQ_LEN;
905         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
906
907         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
908         req.hwrm_intf_min = HWRM_VERSION_MINOR;
909         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
910
911         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
912
913         if (bp->flags & BNXT_FLAG_FW_RESET)
914                 HWRM_CHECK_RESULT_SILENT();
915         else
916                 HWRM_CHECK_RESULT();
917
918         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
919                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
920                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
921                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
922         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
923                      (resp->hwrm_fw_min_8b << 16) |
924                      (resp->hwrm_fw_bld_8b << 8) |
925                      resp->hwrm_fw_rsvd_8b;
926         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
927                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
928
929         fw_version = resp->hwrm_intf_maj_8b << 16;
930         fw_version |= resp->hwrm_intf_min_8b << 8;
931         fw_version |= resp->hwrm_intf_upd_8b;
932         bp->hwrm_spec_code = fw_version;
933
934         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
935                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
936                 rc = -EINVAL;
937                 goto error;
938         }
939
940         if (bp->max_req_len > resp->max_req_win_len) {
941                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
942                 rc = -EINVAL;
943         }
944         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
945         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
946         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
947                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
948
949         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
950         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
951
952         if (bp->max_resp_len != max_resp_len) {
953                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
954                         bp->pdev->addr.domain, bp->pdev->addr.bus,
955                         bp->pdev->addr.devid, bp->pdev->addr.function);
956
957                 rte_free(bp->hwrm_cmd_resp_addr);
958
959                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
960                 if (bp->hwrm_cmd_resp_addr == NULL) {
961                         rc = -ENOMEM;
962                         goto error;
963                 }
964                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
965                 bp->hwrm_cmd_resp_dma_addr =
966                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
967                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
968                         PMD_DRV_LOG(ERR,
969                         "Unable to map response buffer to physical memory.\n");
970                         rc = -ENOMEM;
971                         goto error;
972                 }
973                 bp->max_resp_len = max_resp_len;
974         }
975
976         if ((dev_caps_cfg &
977                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
978             (dev_caps_cfg &
979              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
980                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
981                 bp->flags |= BNXT_FLAG_SHORT_CMD;
982         }
983
984         if (((dev_caps_cfg &
985               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
986              (dev_caps_cfg &
987               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
988             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
989                 sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
990                         bp->pdev->addr.domain, bp->pdev->addr.bus,
991                         bp->pdev->addr.devid, bp->pdev->addr.function);
992
993                 rte_free(bp->hwrm_short_cmd_req_addr);
994
995                 bp->hwrm_short_cmd_req_addr =
996                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
997                 if (bp->hwrm_short_cmd_req_addr == NULL) {
998                         rc = -ENOMEM;
999                         goto error;
1000                 }
1001                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
1002                 bp->hwrm_short_cmd_req_dma_addr =
1003                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
1004                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1005                         rte_free(bp->hwrm_short_cmd_req_addr);
1006                         PMD_DRV_LOG(ERR,
1007                                 "Unable to map buffer to physical memory.\n");
1008                         rc = -ENOMEM;
1009                         goto error;
1010                 }
1011         }
1012         if (dev_caps_cfg &
1013             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1014                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1015                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1016         }
1017         if (dev_caps_cfg &
1018             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1019                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1020
1021 error:
1022         HWRM_UNLOCK();
1023         return rc;
1024 }
1025
1026 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1027 {
1028         int rc;
1029         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1030         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1031
1032         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1033                 return 0;
1034
1035         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1036         req.flags = flags;
1037
1038         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1039
1040         HWRM_CHECK_RESULT();
1041         HWRM_UNLOCK();
1042
1043         return rc;
1044 }
1045
1046 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1047 {
1048         int rc = 0;
1049         struct hwrm_port_phy_cfg_input req = {0};
1050         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1051         uint32_t enables = 0;
1052
1053         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1054
1055         if (conf->link_up) {
1056                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1057                 if (bp->link_info.auto_mode && conf->link_speed) {
1058                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1059                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1060                 }
1061
1062                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1063                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
1064                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1065                 /*
1066                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1067                  * any auto mode, even "none".
1068                  */
1069                 if (!conf->link_speed) {
1070                         /* No speeds specified. Enable AutoNeg - all speeds */
1071                         req.auto_mode =
1072                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1073                 }
1074                 /* AutoNeg - Advertise speeds specified. */
1075                 if (conf->auto_link_speed_mask &&
1076                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1077                         req.auto_mode =
1078                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1079                         req.auto_link_speed_mask =
1080                                 conf->auto_link_speed_mask;
1081                         enables |=
1082                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1083                 }
1084
1085                 req.auto_duplex = conf->duplex;
1086                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1087                 req.auto_pause = conf->auto_pause;
1088                 req.force_pause = conf->force_pause;
1089                 /* Set force_pause if there is no auto or if there is a force */
1090                 if (req.auto_pause && !req.force_pause)
1091                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1092                 else
1093                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1094
1095                 req.enables = rte_cpu_to_le_32(enables);
1096         } else {
1097                 req.flags =
1098                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1099                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1100         }
1101
1102         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1103
1104         HWRM_CHECK_RESULT();
1105         HWRM_UNLOCK();
1106
1107         return rc;
1108 }
1109
1110 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1111                                    struct bnxt_link_info *link_info)
1112 {
1113         int rc = 0;
1114         struct hwrm_port_phy_qcfg_input req = {0};
1115         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1116
1117         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1118
1119         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1120
1121         HWRM_CHECK_RESULT();
1122
1123         link_info->phy_link_status = resp->link;
1124         link_info->link_up =
1125                 (link_info->phy_link_status ==
1126                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1127         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1128         link_info->duplex = resp->duplex_cfg;
1129         link_info->pause = resp->pause;
1130         link_info->auto_pause = resp->auto_pause;
1131         link_info->force_pause = resp->force_pause;
1132         link_info->auto_mode = resp->auto_mode;
1133         link_info->phy_type = resp->phy_type;
1134         link_info->media_type = resp->media_type;
1135
1136         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1137         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1138         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1139         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1140         link_info->phy_ver[0] = resp->phy_maj;
1141         link_info->phy_ver[1] = resp->phy_min;
1142         link_info->phy_ver[2] = resp->phy_bld;
1143
1144         HWRM_UNLOCK();
1145
1146         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1147         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1148         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1149         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1150         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1151                     link_info->auto_link_speed_mask);
1152         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1153                     link_info->force_link_speed);
1154
1155         return rc;
1156 }
1157
1158 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1159 {
1160         int rc = 0;
1161         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1162         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1163         int i;
1164
1165         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1166
1167         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1168         /* HWRM Version >= 1.9.1 */
1169         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1170                 req.drv_qmap_cap =
1171                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1172         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1173
1174         HWRM_CHECK_RESULT();
1175
1176 #define GET_QUEUE_INFO(x) \
1177         bp->cos_queue[x].id = resp->queue_id##x; \
1178         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1179
1180         GET_QUEUE_INFO(0);
1181         GET_QUEUE_INFO(1);
1182         GET_QUEUE_INFO(2);
1183         GET_QUEUE_INFO(3);
1184         GET_QUEUE_INFO(4);
1185         GET_QUEUE_INFO(5);
1186         GET_QUEUE_INFO(6);
1187         GET_QUEUE_INFO(7);
1188
1189         HWRM_UNLOCK();
1190
1191         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1192                 bp->tx_cosq_id = bp->cos_queue[0].id;
1193         } else {
1194                 /* iterate and find the COSq profile to use for Tx */
1195                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1196                         if (bp->cos_queue[i].profile ==
1197                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1198                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1199                                 break;
1200                         }
1201                 }
1202         }
1203
1204         bp->max_tc = resp->max_configurable_queues;
1205         bp->max_lltc = resp->max_configurable_lossless_queues;
1206         if (bp->max_tc > BNXT_MAX_QUEUE)
1207                 bp->max_tc = BNXT_MAX_QUEUE;
1208         bp->max_q = bp->max_tc;
1209
1210         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1211
1212         return rc;
1213 }
1214
1215 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1216                          struct bnxt_ring *ring,
1217                          uint32_t ring_type, uint32_t map_index,
1218                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1219 {
1220         int rc = 0;
1221         uint32_t enables = 0;
1222         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1223         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1224         struct rte_mempool *mb_pool;
1225         uint16_t rx_buf_size;
1226
1227         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1228
1229         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1230         req.fbo = rte_cpu_to_le_32(0);
1231         /* Association of ring index with doorbell index */
1232         req.logical_id = rte_cpu_to_le_16(map_index);
1233         req.length = rte_cpu_to_le_32(ring->ring_size);
1234
1235         switch (ring_type) {
1236         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1237                 req.ring_type = ring_type;
1238                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1239                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1240                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1241                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1242                         enables |=
1243                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1244                 break;
1245         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1246                 req.ring_type = ring_type;
1247                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1248                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1249                 if (BNXT_CHIP_THOR(bp)) {
1250                         mb_pool = bp->rx_queues[0]->mb_pool;
1251                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1252                                       RTE_PKTMBUF_HEADROOM;
1253                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1254                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1255                         enables |=
1256                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1257                 }
1258                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1259                         enables |=
1260                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1261                 break;
1262         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1263                 req.ring_type = ring_type;
1264                 if (BNXT_HAS_NQ(bp)) {
1265                         /* Association of cp ring with nq */
1266                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1267                         enables |=
1268                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1269                 }
1270                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1271                 break;
1272         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1273                 req.ring_type = ring_type;
1274                 req.page_size = BNXT_PAGE_SHFT;
1275                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1276                 break;
1277         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1278                 req.ring_type = ring_type;
1279                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1280
1281                 mb_pool = bp->rx_queues[0]->mb_pool;
1282                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1283                               RTE_PKTMBUF_HEADROOM;
1284                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1285                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1286
1287                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1288                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1289                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1290                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1291                 break;
1292         default:
1293                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1294                         ring_type);
1295                 HWRM_UNLOCK();
1296                 return -EINVAL;
1297         }
1298         req.enables = rte_cpu_to_le_32(enables);
1299
1300         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1301
1302         if (rc || resp->error_code) {
1303                 if (rc == 0 && resp->error_code)
1304                         rc = rte_le_to_cpu_16(resp->error_code);
1305                 switch (ring_type) {
1306                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1307                         PMD_DRV_LOG(ERR,
1308                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1309                         HWRM_UNLOCK();
1310                         return rc;
1311                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1312                         PMD_DRV_LOG(ERR,
1313                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1314                         HWRM_UNLOCK();
1315                         return rc;
1316                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1317                         PMD_DRV_LOG(ERR,
1318                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1319                                     rc);
1320                         HWRM_UNLOCK();
1321                         return rc;
1322                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1323                         PMD_DRV_LOG(ERR,
1324                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1325                         HWRM_UNLOCK();
1326                         return rc;
1327                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1328                         PMD_DRV_LOG(ERR,
1329                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1330                         HWRM_UNLOCK();
1331                         return rc;
1332                 default:
1333                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1334                         HWRM_UNLOCK();
1335                         return rc;
1336                 }
1337         }
1338
1339         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1340         HWRM_UNLOCK();
1341         return rc;
1342 }
1343
1344 int bnxt_hwrm_ring_free(struct bnxt *bp,
1345                         struct bnxt_ring *ring, uint32_t ring_type)
1346 {
1347         int rc;
1348         struct hwrm_ring_free_input req = {.req_type = 0 };
1349         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1350
1351         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1352
1353         req.ring_type = ring_type;
1354         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1355
1356         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1357
1358         if (rc || resp->error_code) {
1359                 if (rc == 0 && resp->error_code)
1360                         rc = rte_le_to_cpu_16(resp->error_code);
1361                 HWRM_UNLOCK();
1362
1363                 switch (ring_type) {
1364                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1365                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1366                                 rc);
1367                         return rc;
1368                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1369                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1370                                 rc);
1371                         return rc;
1372                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1373                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1374                                 rc);
1375                         return rc;
1376                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1377                         PMD_DRV_LOG(ERR,
1378                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1379                         return rc;
1380                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1381                         PMD_DRV_LOG(ERR,
1382                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1383                         return rc;
1384                 default:
1385                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1386                         return rc;
1387                 }
1388         }
1389         HWRM_UNLOCK();
1390         return 0;
1391 }
1392
1393 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1394 {
1395         int rc = 0;
1396         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1397         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1398
1399         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1400
1401         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1402         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1403         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1404         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1405
1406         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1407
1408         HWRM_CHECK_RESULT();
1409
1410         bp->grp_info[idx].fw_grp_id =
1411             rte_le_to_cpu_16(resp->ring_group_id);
1412
1413         HWRM_UNLOCK();
1414
1415         return rc;
1416 }
1417
1418 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1419 {
1420         int rc;
1421         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1422         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1423
1424         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1425
1426         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1427
1428         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1429
1430         HWRM_CHECK_RESULT();
1431         HWRM_UNLOCK();
1432
1433         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1434         return rc;
1435 }
1436
1437 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1438 {
1439         int rc = 0;
1440         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1441         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1442
1443         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1444                 return rc;
1445
1446         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1447
1448         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1449
1450         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1451
1452         HWRM_CHECK_RESULT();
1453         HWRM_UNLOCK();
1454
1455         return rc;
1456 }
1457
1458 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1459                                 unsigned int idx __rte_unused)
1460 {
1461         int rc;
1462         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1463         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1464
1465         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1466
1467         req.update_period_ms = rte_cpu_to_le_32(0);
1468
1469         req.stats_dma_addr =
1470             rte_cpu_to_le_64(cpr->hw_stats_map);
1471
1472         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1473
1474         HWRM_CHECK_RESULT();
1475
1476         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1477
1478         HWRM_UNLOCK();
1479
1480         return rc;
1481 }
1482
1483 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1484                                 unsigned int idx __rte_unused)
1485 {
1486         int rc;
1487         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1488         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1489
1490         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1491
1492         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1493
1494         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1495
1496         HWRM_CHECK_RESULT();
1497         HWRM_UNLOCK();
1498
1499         return rc;
1500 }
1501
1502 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1503 {
1504         int rc = 0, i, j;
1505         struct hwrm_vnic_alloc_input req = { 0 };
1506         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1507
1508         if (!BNXT_HAS_RING_GRPS(bp))
1509                 goto skip_ring_grps;
1510
1511         /* map ring groups to this vnic */
1512         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1513                 vnic->start_grp_id, vnic->end_grp_id);
1514         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1515                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1516
1517         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1518         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1519         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1520         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1521
1522 skip_ring_grps:
1523         vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
1524                                 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
1525         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1526
1527         if (vnic->func_default)
1528                 req.flags =
1529                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1530         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1531
1532         HWRM_CHECK_RESULT();
1533
1534         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1535         HWRM_UNLOCK();
1536         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1537         return rc;
1538 }
1539
1540 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1541                                         struct bnxt_vnic_info *vnic,
1542                                         struct bnxt_plcmodes_cfg *pmode)
1543 {
1544         int rc = 0;
1545         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1546         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1547
1548         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1549
1550         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1551
1552         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1553
1554         HWRM_CHECK_RESULT();
1555
1556         pmode->flags = rte_le_to_cpu_32(resp->flags);
1557         /* dflt_vnic bit doesn't exist in the _cfg command */
1558         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1559         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1560         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1561         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1562
1563         HWRM_UNLOCK();
1564
1565         return rc;
1566 }
1567
1568 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1569                                        struct bnxt_vnic_info *vnic,
1570                                        struct bnxt_plcmodes_cfg *pmode)
1571 {
1572         int rc = 0;
1573         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1574         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1575
1576         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1577                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1578                 return rc;
1579         }
1580
1581         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1582
1583         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1584         req.flags = rte_cpu_to_le_32(pmode->flags);
1585         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1586         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1587         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1588         req.enables = rte_cpu_to_le_32(
1589             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1590             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1591             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1592         );
1593
1594         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1595
1596         HWRM_CHECK_RESULT();
1597         HWRM_UNLOCK();
1598
1599         return rc;
1600 }
1601
1602 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1603 {
1604         int rc = 0;
1605         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1606         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1607         struct bnxt_plcmodes_cfg pmodes = { 0 };
1608         uint32_t ctx_enable_flag = 0;
1609         uint32_t enables = 0;
1610
1611         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1612                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1613                 return rc;
1614         }
1615
1616         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1617         if (rc)
1618                 return rc;
1619
1620         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1621
1622         if (BNXT_CHIP_THOR(bp)) {
1623                 struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];
1624                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1625                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1626
1627                 req.default_rx_ring_id =
1628                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1629                 req.default_cmpl_ring_id =
1630                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1631                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1632                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1633                 goto config_mru;
1634         }
1635
1636         /* Only RSS support for now TBD: COS & LB */
1637         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1638         if (vnic->lb_rule != 0xffff)
1639                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1640         if (vnic->cos_rule != 0xffff)
1641                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1642         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1643                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1644                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1645         }
1646         enables |= ctx_enable_flag;
1647         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1648         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1649         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1650         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1651
1652 config_mru:
1653         req.enables = rte_cpu_to_le_32(enables);
1654         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1655         req.mru = rte_cpu_to_le_16(vnic->mru);
1656         /* Configure default VNIC only once. */
1657         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1658                 req.flags |=
1659                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1660                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1661         }
1662         if (vnic->vlan_strip)
1663                 req.flags |=
1664                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1665         if (vnic->bd_stall)
1666                 req.flags |=
1667                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1668         if (vnic->roce_dual)
1669                 req.flags |= rte_cpu_to_le_32(
1670                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1671         if (vnic->roce_only)
1672                 req.flags |= rte_cpu_to_le_32(
1673                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1674         if (vnic->rss_dflt_cr)
1675                 req.flags |= rte_cpu_to_le_32(
1676                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1677
1678         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1679
1680         HWRM_CHECK_RESULT();
1681         HWRM_UNLOCK();
1682
1683         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1684
1685         return rc;
1686 }
1687
1688 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1689                 int16_t fw_vf_id)
1690 {
1691         int rc = 0;
1692         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1693         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1694
1695         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1696                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1697                 return rc;
1698         }
1699         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1700
1701         req.enables =
1702                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1703         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1704         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1705
1706         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1707
1708         HWRM_CHECK_RESULT();
1709
1710         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1711         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1712         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1713         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1714         vnic->mru = rte_le_to_cpu_16(resp->mru);
1715         vnic->func_default = rte_le_to_cpu_32(
1716                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1717         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1718                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1719         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1720                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1721         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1722                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1723         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1724                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1725         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1726                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1727
1728         HWRM_UNLOCK();
1729
1730         return rc;
1731 }
1732
1733 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
1734                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1735 {
1736         int rc = 0;
1737         uint16_t ctx_id;
1738         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1739         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1740                                                 bp->hwrm_cmd_resp_addr;
1741
1742         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1743
1744         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1745         HWRM_CHECK_RESULT();
1746
1747         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1748         if (!BNXT_HAS_RING_GRPS(bp))
1749                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
1750         else if (ctx_idx == 0)
1751                 vnic->rss_rule = ctx_id;
1752
1753         HWRM_UNLOCK();
1754
1755         return rc;
1756 }
1757
1758 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
1759                             struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1760 {
1761         int rc = 0;
1762         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1763         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1764                                                 bp->hwrm_cmd_resp_addr;
1765
1766         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
1767                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1768                 return rc;
1769         }
1770         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1771
1772         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
1773
1774         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1775
1776         HWRM_CHECK_RESULT();
1777         HWRM_UNLOCK();
1778
1779         return rc;
1780 }
1781
1782 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1783 {
1784         int rc = 0;
1785         struct hwrm_vnic_free_input req = {.req_type = 0 };
1786         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1787
1788         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1789                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1790                 return rc;
1791         }
1792
1793         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1794
1795         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1796
1797         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1798
1799         HWRM_CHECK_RESULT();
1800         HWRM_UNLOCK();
1801
1802         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1803         /* Configure default VNIC again if necessary. */
1804         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
1805                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
1806
1807         return rc;
1808 }
1809
1810 static int
1811 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1812 {
1813         int i;
1814         int rc = 0;
1815         int nr_ctxs = vnic->num_lb_ctxts;
1816         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1817         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1818
1819         for (i = 0; i < nr_ctxs; i++) {
1820                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1821
1822                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1823                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1824                 req.hash_mode_flags = vnic->hash_mode;
1825
1826                 req.hash_key_tbl_addr =
1827                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1828
1829                 req.ring_grp_tbl_addr =
1830                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
1831                                          i * HW_HASH_INDEX_SIZE);
1832                 req.ring_table_pair_index = i;
1833                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
1834
1835                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
1836                                             BNXT_USE_CHIMP_MB);
1837
1838                 HWRM_CHECK_RESULT();
1839                 HWRM_UNLOCK();
1840         }
1841
1842         return rc;
1843 }
1844
1845 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1846                            struct bnxt_vnic_info *vnic)
1847 {
1848         int rc = 0;
1849         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1850         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1851
1852         if (!vnic->rss_table)
1853                 return 0;
1854
1855         if (BNXT_CHIP_THOR(bp))
1856                 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
1857
1858         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1859
1860         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1861         req.hash_mode_flags = vnic->hash_mode;
1862
1863         req.ring_grp_tbl_addr =
1864             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1865         req.hash_key_tbl_addr =
1866             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1867         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1868         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1869
1870         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1871
1872         HWRM_CHECK_RESULT();
1873         HWRM_UNLOCK();
1874
1875         return rc;
1876 }
1877
1878 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1879                         struct bnxt_vnic_info *vnic)
1880 {
1881         int rc = 0;
1882         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1883         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1884         uint16_t size;
1885
1886         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1887                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1888                 return rc;
1889         }
1890
1891         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1892
1893         req.flags = rte_cpu_to_le_32(
1894                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1895
1896         req.enables = rte_cpu_to_le_32(
1897                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1898
1899         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1900         size -= RTE_PKTMBUF_HEADROOM;
1901         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1902
1903         req.jumbo_thresh = rte_cpu_to_le_16(size);
1904         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1905
1906         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1907
1908         HWRM_CHECK_RESULT();
1909         HWRM_UNLOCK();
1910
1911         return rc;
1912 }
1913
1914 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1915                         struct bnxt_vnic_info *vnic, bool enable)
1916 {
1917         int rc = 0;
1918         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1919         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1920
1921         if (BNXT_CHIP_THOR(bp))
1922                 return 0;
1923
1924         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1925
1926         if (enable) {
1927                 req.enables = rte_cpu_to_le_32(
1928                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1929                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1930                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1931                 req.flags = rte_cpu_to_le_32(
1932                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1933                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1934                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1935                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1936                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1937                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1938                 req.max_agg_segs = rte_cpu_to_le_16(5);
1939                 req.max_aggs =
1940                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1941                 req.min_agg_len = rte_cpu_to_le_32(512);
1942         }
1943         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1944
1945         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1946
1947         HWRM_CHECK_RESULT();
1948         HWRM_UNLOCK();
1949
1950         return rc;
1951 }
1952
1953 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1954 {
1955         struct hwrm_func_cfg_input req = {0};
1956         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1957         int rc;
1958
1959         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1960         req.enables = rte_cpu_to_le_32(
1961                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1962         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1963         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1964
1965         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1966
1967         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1968         HWRM_CHECK_RESULT();
1969         HWRM_UNLOCK();
1970
1971         bp->pf.vf_info[vf].random_mac = false;
1972
1973         return rc;
1974 }
1975
1976 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1977                                   uint64_t *dropped)
1978 {
1979         int rc = 0;
1980         struct hwrm_func_qstats_input req = {.req_type = 0};
1981         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1982
1983         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1984
1985         req.fid = rte_cpu_to_le_16(fid);
1986
1987         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1988
1989         HWRM_CHECK_RESULT();
1990
1991         if (dropped)
1992                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1993
1994         HWRM_UNLOCK();
1995
1996         return rc;
1997 }
1998
1999 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2000                           struct rte_eth_stats *stats)
2001 {
2002         int rc = 0;
2003         struct hwrm_func_qstats_input req = {.req_type = 0};
2004         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2005
2006         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2007
2008         req.fid = rte_cpu_to_le_16(fid);
2009
2010         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2011
2012         HWRM_CHECK_RESULT();
2013
2014         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2015         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2016         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2017         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2018         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2019         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2020
2021         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2022         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2023         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2024         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2025         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2026         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2027
2028         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2029         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2030         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2031
2032         HWRM_UNLOCK();
2033
2034         return rc;
2035 }
2036
2037 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2038 {
2039         int rc = 0;
2040         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2041         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2042
2043         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2044
2045         req.fid = rte_cpu_to_le_16(fid);
2046
2047         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2048
2049         HWRM_CHECK_RESULT();
2050         HWRM_UNLOCK();
2051
2052         return rc;
2053 }
2054
2055 /*
2056  * HWRM utility functions
2057  */
2058
2059 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2060 {
2061         unsigned int i;
2062         int rc = 0;
2063
2064         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2065                 struct bnxt_tx_queue *txq;
2066                 struct bnxt_rx_queue *rxq;
2067                 struct bnxt_cp_ring_info *cpr;
2068
2069                 if (i >= bp->rx_cp_nr_rings) {
2070                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2071                         cpr = txq->cp_ring;
2072                 } else {
2073                         rxq = bp->rx_queues[i];
2074                         cpr = rxq->cp_ring;
2075                 }
2076
2077                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2078                 if (rc)
2079                         return rc;
2080         }
2081         return 0;
2082 }
2083
2084 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2085 {
2086         int rc;
2087         unsigned int i;
2088         struct bnxt_cp_ring_info *cpr;
2089
2090         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2091
2092                 if (i >= bp->rx_cp_nr_rings) {
2093                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2094                 } else {
2095                         cpr = bp->rx_queues[i]->cp_ring;
2096                         if (BNXT_HAS_RING_GRPS(bp))
2097                                 bp->grp_info[i].fw_stats_ctx = -1;
2098                 }
2099                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2100                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2101                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2102                         if (rc)
2103                                 return rc;
2104                 }
2105         }
2106         return 0;
2107 }
2108
2109 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2110 {
2111         unsigned int i;
2112         int rc = 0;
2113
2114         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2115                 struct bnxt_tx_queue *txq;
2116                 struct bnxt_rx_queue *rxq;
2117                 struct bnxt_cp_ring_info *cpr;
2118
2119                 if (i >= bp->rx_cp_nr_rings) {
2120                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2121                         cpr = txq->cp_ring;
2122                 } else {
2123                         rxq = bp->rx_queues[i];
2124                         cpr = rxq->cp_ring;
2125                 }
2126
2127                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2128
2129                 if (rc)
2130                         return rc;
2131         }
2132         return rc;
2133 }
2134
2135 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2136 {
2137         uint16_t idx;
2138         uint32_t rc = 0;
2139
2140         if (!BNXT_HAS_RING_GRPS(bp))
2141                 return 0;
2142
2143         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2144
2145                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2146                         continue;
2147
2148                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2149
2150                 if (rc)
2151                         return rc;
2152         }
2153         return rc;
2154 }
2155
2156 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2157 {
2158         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2159
2160         bnxt_hwrm_ring_free(bp, cp_ring,
2161                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2162         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2163         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2164                                      sizeof(*cpr->cp_desc_ring));
2165         cpr->cp_raw_cons = 0;
2166         cpr->valid = 0;
2167 }
2168
2169 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2170 {
2171         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2172
2173         bnxt_hwrm_ring_free(bp, cp_ring,
2174                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2175         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2176         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2177                         sizeof(*cpr->cp_desc_ring));
2178         cpr->cp_raw_cons = 0;
2179         cpr->valid = 0;
2180 }
2181
2182 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2183 {
2184         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2185         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2186         struct bnxt_ring *ring = rxr->rx_ring_struct;
2187         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2188
2189         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2190                 bnxt_hwrm_ring_free(bp, ring,
2191                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2192                 ring->fw_ring_id = INVALID_HW_RING_ID;
2193                 if (BNXT_HAS_RING_GRPS(bp))
2194                         bp->grp_info[queue_index].rx_fw_ring_id =
2195                                                         INVALID_HW_RING_ID;
2196                 memset(rxr->rx_desc_ring, 0,
2197                        rxr->rx_ring_struct->ring_size *
2198                        sizeof(*rxr->rx_desc_ring));
2199                 memset(rxr->rx_buf_ring, 0,
2200                        rxr->rx_ring_struct->ring_size *
2201                        sizeof(*rxr->rx_buf_ring));
2202                 rxr->rx_prod = 0;
2203         }
2204         ring = rxr->ag_ring_struct;
2205         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2206                 bnxt_hwrm_ring_free(bp, ring,
2207                                     BNXT_CHIP_THOR(bp) ?
2208                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2209                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2210                 ring->fw_ring_id = INVALID_HW_RING_ID;
2211                 memset(rxr->ag_buf_ring, 0,
2212                        rxr->ag_ring_struct->ring_size *
2213                        sizeof(*rxr->ag_buf_ring));
2214                 rxr->ag_prod = 0;
2215                 if (BNXT_HAS_RING_GRPS(bp))
2216                         bp->grp_info[queue_index].ag_fw_ring_id =
2217                                                         INVALID_HW_RING_ID;
2218         }
2219         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2220                 bnxt_free_cp_ring(bp, cpr);
2221                 if (rxq->nq_ring)
2222                         bnxt_free_nq_ring(bp, rxq->nq_ring);
2223         }
2224
2225         if (BNXT_HAS_RING_GRPS(bp))
2226                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2227 }
2228
2229 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
2230 {
2231         unsigned int i;
2232
2233         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2234                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2235                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2236                 struct bnxt_ring *ring = txr->tx_ring_struct;
2237                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2238
2239                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2240                         bnxt_hwrm_ring_free(bp, ring,
2241                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2242                         ring->fw_ring_id = INVALID_HW_RING_ID;
2243                         memset(txr->tx_desc_ring, 0,
2244                                         txr->tx_ring_struct->ring_size *
2245                                         sizeof(*txr->tx_desc_ring));
2246                         memset(txr->tx_buf_ring, 0,
2247                                         txr->tx_ring_struct->ring_size *
2248                                         sizeof(*txr->tx_buf_ring));
2249                         txr->tx_prod = 0;
2250                         txr->tx_cons = 0;
2251                 }
2252                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2253                         bnxt_free_cp_ring(bp, cpr);
2254                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2255                         if (txq->nq_ring)
2256                                 bnxt_free_nq_ring(bp, txq->nq_ring);
2257                 }
2258         }
2259
2260         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2261                 bnxt_free_hwrm_rx_ring(bp, i);
2262
2263         return 0;
2264 }
2265
2266 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2267 {
2268         uint16_t i;
2269         uint32_t rc = 0;
2270
2271         if (!BNXT_HAS_RING_GRPS(bp))
2272                 return 0;
2273
2274         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2275                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2276                 if (rc)
2277                         return rc;
2278         }
2279         return rc;
2280 }
2281
2282 void bnxt_free_hwrm_resources(struct bnxt *bp)
2283 {
2284         /* Release memzone */
2285         rte_free(bp->hwrm_cmd_resp_addr);
2286         rte_free(bp->hwrm_short_cmd_req_addr);
2287         bp->hwrm_cmd_resp_addr = NULL;
2288         bp->hwrm_short_cmd_req_addr = NULL;
2289         bp->hwrm_cmd_resp_dma_addr = 0;
2290         bp->hwrm_short_cmd_req_dma_addr = 0;
2291 }
2292
2293 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2294 {
2295         struct rte_pci_device *pdev = bp->pdev;
2296         char type[RTE_MEMZONE_NAMESIZE];
2297
2298         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2299                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2300         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2301         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2302         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2303         if (bp->hwrm_cmd_resp_addr == NULL)
2304                 return -ENOMEM;
2305         bp->hwrm_cmd_resp_dma_addr =
2306                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2307         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2308                 PMD_DRV_LOG(ERR,
2309                         "unable to map response address to physical memory\n");
2310                 return -ENOMEM;
2311         }
2312         rte_spinlock_init(&bp->hwrm_lock);
2313
2314         return 0;
2315 }
2316
2317 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2318 {
2319         struct bnxt_filter_info *filter;
2320         int rc = 0;
2321
2322         STAILQ_FOREACH(filter, &vnic->filter, next) {
2323                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2324                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2325                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2326                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2327                 else
2328                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2329                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2330                 bnxt_free_filter(bp, filter);
2331                 //if (rc)
2332                         //break;
2333         }
2334         return rc;
2335 }
2336
2337 static int
2338 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2339 {
2340         struct bnxt_filter_info *filter;
2341         struct rte_flow *flow;
2342         int rc = 0;
2343
2344         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2345                 filter = flow->filter;
2346                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2347                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2348                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2349                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2350                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2351                 else
2352                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2353
2354                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2355                 rte_free(flow);
2356                 //if (rc)
2357                         //break;
2358         }
2359         return rc;
2360 }
2361
2362 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2363 {
2364         struct bnxt_filter_info *filter;
2365         int rc = 0;
2366
2367         STAILQ_FOREACH(filter, &vnic->filter, next) {
2368                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2369                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2370                                                      filter);
2371                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2372                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2373                                                          filter);
2374                 else
2375                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2376                                                      filter);
2377                 if (rc)
2378                         break;
2379         }
2380         return rc;
2381 }
2382
2383 void bnxt_free_tunnel_ports(struct bnxt *bp)
2384 {
2385         if (bp->vxlan_port_cnt)
2386                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2387                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2388         bp->vxlan_port = 0;
2389         if (bp->geneve_port_cnt)
2390                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2391                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2392         bp->geneve_port = 0;
2393 }
2394
2395 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2396 {
2397         int i, j;
2398
2399         if (bp->vnic_info == NULL)
2400                 return;
2401
2402         /*
2403          * Cleanup VNICs in reverse order, to make sure the L2 filter
2404          * from vnic0 is last to be cleaned up.
2405          */
2406         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2407                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2408
2409                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2410                         PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2411                         return;
2412                 }
2413
2414                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2415
2416                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2417
2418                 if (BNXT_CHIP_THOR(bp)) {
2419                         for (j = 0; j < vnic->num_lb_ctxts; j++) {
2420                                 bnxt_hwrm_vnic_ctx_free(bp, vnic,
2421                                                         vnic->fw_grp_ids[j]);
2422                                 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2423                         }
2424                         vnic->num_lb_ctxts = 0;
2425                 } else {
2426                         bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2427                         vnic->rss_rule = INVALID_HW_RING_ID;
2428                 }
2429
2430                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2431
2432                 bnxt_hwrm_vnic_free(bp, vnic);
2433
2434                 rte_free(vnic->fw_grp_ids);
2435         }
2436         /* Ring resources */
2437         bnxt_free_all_hwrm_rings(bp);
2438         bnxt_free_all_hwrm_ring_grps(bp);
2439         bnxt_free_all_hwrm_stat_ctxs(bp);
2440         bnxt_free_tunnel_ports(bp);
2441 }
2442
2443 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2444 {
2445         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2446
2447         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2448                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2449
2450         switch (conf_link_speed) {
2451         case ETH_LINK_SPEED_10M_HD:
2452         case ETH_LINK_SPEED_100M_HD:
2453                 /* FALLTHROUGH */
2454                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2455         }
2456         return hw_link_duplex;
2457 }
2458
2459 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2460 {
2461         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2462 }
2463
2464 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2465 {
2466         uint16_t eth_link_speed = 0;
2467
2468         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2469                 return ETH_LINK_SPEED_AUTONEG;
2470
2471         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2472         case ETH_LINK_SPEED_100M:
2473         case ETH_LINK_SPEED_100M_HD:
2474                 /* FALLTHROUGH */
2475                 eth_link_speed =
2476                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2477                 break;
2478         case ETH_LINK_SPEED_1G:
2479                 eth_link_speed =
2480                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2481                 break;
2482         case ETH_LINK_SPEED_2_5G:
2483                 eth_link_speed =
2484                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2485                 break;
2486         case ETH_LINK_SPEED_10G:
2487                 eth_link_speed =
2488                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2489                 break;
2490         case ETH_LINK_SPEED_20G:
2491                 eth_link_speed =
2492                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2493                 break;
2494         case ETH_LINK_SPEED_25G:
2495                 eth_link_speed =
2496                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2497                 break;
2498         case ETH_LINK_SPEED_40G:
2499                 eth_link_speed =
2500                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2501                 break;
2502         case ETH_LINK_SPEED_50G:
2503                 eth_link_speed =
2504                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2505                 break;
2506         case ETH_LINK_SPEED_100G:
2507                 eth_link_speed =
2508                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2509                 break;
2510         default:
2511                 PMD_DRV_LOG(ERR,
2512                         "Unsupported link speed %d; default to AUTO\n",
2513                         conf_link_speed);
2514                 break;
2515         }
2516         return eth_link_speed;
2517 }
2518
2519 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2520                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2521                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2522                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2523
2524 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2525 {
2526         uint32_t one_speed;
2527
2528         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2529                 return 0;
2530
2531         if (link_speed & ETH_LINK_SPEED_FIXED) {
2532                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2533
2534                 if (one_speed & (one_speed - 1)) {
2535                         PMD_DRV_LOG(ERR,
2536                                 "Invalid advertised speeds (%u) for port %u\n",
2537                                 link_speed, port_id);
2538                         return -EINVAL;
2539                 }
2540                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2541                         PMD_DRV_LOG(ERR,
2542                                 "Unsupported advertised speed (%u) for port %u\n",
2543                                 link_speed, port_id);
2544                         return -EINVAL;
2545                 }
2546         } else {
2547                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2548                         PMD_DRV_LOG(ERR,
2549                                 "Unsupported advertised speeds (%u) for port %u\n",
2550                                 link_speed, port_id);
2551                         return -EINVAL;
2552                 }
2553         }
2554         return 0;
2555 }
2556
2557 static uint16_t
2558 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2559 {
2560         uint16_t ret = 0;
2561
2562         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2563                 if (bp->link_info.support_speeds)
2564                         return bp->link_info.support_speeds;
2565                 link_speed = BNXT_SUPPORTED_SPEEDS;
2566         }
2567
2568         if (link_speed & ETH_LINK_SPEED_100M)
2569                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2570         if (link_speed & ETH_LINK_SPEED_100M_HD)
2571                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2572         if (link_speed & ETH_LINK_SPEED_1G)
2573                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2574         if (link_speed & ETH_LINK_SPEED_2_5G)
2575                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2576         if (link_speed & ETH_LINK_SPEED_10G)
2577                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2578         if (link_speed & ETH_LINK_SPEED_20G)
2579                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2580         if (link_speed & ETH_LINK_SPEED_25G)
2581                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2582         if (link_speed & ETH_LINK_SPEED_40G)
2583                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2584         if (link_speed & ETH_LINK_SPEED_50G)
2585                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2586         if (link_speed & ETH_LINK_SPEED_100G)
2587                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2588         return ret;
2589 }
2590
2591 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2592 {
2593         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2594
2595         switch (hw_link_speed) {
2596         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2597                 eth_link_speed = ETH_SPEED_NUM_100M;
2598                 break;
2599         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2600                 eth_link_speed = ETH_SPEED_NUM_1G;
2601                 break;
2602         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2603                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2604                 break;
2605         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2606                 eth_link_speed = ETH_SPEED_NUM_10G;
2607                 break;
2608         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2609                 eth_link_speed = ETH_SPEED_NUM_20G;
2610                 break;
2611         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2612                 eth_link_speed = ETH_SPEED_NUM_25G;
2613                 break;
2614         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2615                 eth_link_speed = ETH_SPEED_NUM_40G;
2616                 break;
2617         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2618                 eth_link_speed = ETH_SPEED_NUM_50G;
2619                 break;
2620         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2621                 eth_link_speed = ETH_SPEED_NUM_100G;
2622                 break;
2623         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2624         default:
2625                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2626                         hw_link_speed);
2627                 break;
2628         }
2629         return eth_link_speed;
2630 }
2631
2632 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2633 {
2634         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2635
2636         switch (hw_link_duplex) {
2637         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2638         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2639                 /* FALLTHROUGH */
2640                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2641                 break;
2642         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2643                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2644                 break;
2645         default:
2646                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2647                         hw_link_duplex);
2648                 break;
2649         }
2650         return eth_link_duplex;
2651 }
2652
2653 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2654 {
2655         int rc = 0;
2656         struct bnxt_link_info *link_info = &bp->link_info;
2657
2658         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2659         if (rc) {
2660                 PMD_DRV_LOG(ERR,
2661                         "Get link config failed with rc %d\n", rc);
2662                 goto exit;
2663         }
2664         if (link_info->link_speed)
2665                 link->link_speed =
2666                         bnxt_parse_hw_link_speed(link_info->link_speed);
2667         else
2668                 link->link_speed = ETH_SPEED_NUM_NONE;
2669         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2670         link->link_status = link_info->link_up;
2671         link->link_autoneg = link_info->auto_mode ==
2672                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2673                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2674 exit:
2675         return rc;
2676 }
2677
2678 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2679 {
2680         int rc = 0;
2681         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2682         struct bnxt_link_info link_req;
2683         uint16_t speed, autoneg;
2684
2685         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2686                 return 0;
2687
2688         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2689                         bp->eth_dev->data->port_id);
2690         if (rc)
2691                 goto error;
2692
2693         memset(&link_req, 0, sizeof(link_req));
2694         link_req.link_up = link_up;
2695         if (!link_up)
2696                 goto port_phy_cfg;
2697
2698         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2699         if (BNXT_CHIP_THOR(bp) &&
2700             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
2701                 /* 40G is not supported as part of media auto detect.
2702                  * The speed should be forced and autoneg disabled
2703                  * to configure 40G speed.
2704                  */
2705                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
2706                 autoneg = 0;
2707         }
2708
2709         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2710         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2711         /* Autoneg can be done only when the FW allows.
2712          * When user configures fixed speed of 40G and later changes to
2713          * any other speed, auto_link_speed/force_link_speed is still set
2714          * to 40G until link comes up at new speed.
2715          */
2716         if (autoneg == 1 &&
2717             !(!BNXT_CHIP_THOR(bp) &&
2718               (bp->link_info.auto_link_speed ||
2719                bp->link_info.force_link_speed))) {
2720                 link_req.phy_flags |=
2721                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2722                 link_req.auto_link_speed_mask =
2723                         bnxt_parse_eth_link_speed_mask(bp,
2724                                                        dev_conf->link_speeds);
2725         } else {
2726                 if (bp->link_info.phy_type ==
2727                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2728                     bp->link_info.phy_type ==
2729                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2730                     bp->link_info.media_type ==
2731                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2732                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2733                         return -EINVAL;
2734                 }
2735
2736                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2737                 /* If user wants a particular speed try that first. */
2738                 if (speed)
2739                         link_req.link_speed = speed;
2740                 else if (bp->link_info.force_link_speed)
2741                         link_req.link_speed = bp->link_info.force_link_speed;
2742                 else
2743                         link_req.link_speed = bp->link_info.auto_link_speed;
2744         }
2745         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2746         link_req.auto_pause = bp->link_info.auto_pause;
2747         link_req.force_pause = bp->link_info.force_pause;
2748
2749 port_phy_cfg:
2750         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2751         if (rc) {
2752                 PMD_DRV_LOG(ERR,
2753                         "Set link config failed with rc %d\n", rc);
2754         }
2755
2756 error:
2757         return rc;
2758 }
2759
2760 /* JIRA 22088 */
2761 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
2762 {
2763         struct hwrm_func_qcfg_input req = {0};
2764         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2765         uint16_t flags;
2766         int rc = 0;
2767
2768         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2769         req.fid = rte_cpu_to_le_16(0xffff);
2770
2771         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2772
2773         HWRM_CHECK_RESULT();
2774
2775         /* Hard Coded.. 0xfff VLAN ID mask */
2776         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2777         flags = rte_le_to_cpu_16(resp->flags);
2778         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2779                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2780
2781         if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2782                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2783                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2784         } else if (BNXT_VF(bp) &&
2785                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2786                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
2787                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
2788         }
2789
2790         if (mtu)
2791                 *mtu = resp->mtu;
2792
2793         switch (resp->port_partition_type) {
2794         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2795         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2796         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2797                 /* FALLTHROUGH */
2798                 bp->port_partition_type = resp->port_partition_type;
2799                 break;
2800         default:
2801                 bp->port_partition_type = 0;
2802                 break;
2803         }
2804
2805         HWRM_UNLOCK();
2806
2807         return rc;
2808 }
2809
2810 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2811                                    struct hwrm_func_qcaps_output *qcaps)
2812 {
2813         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2814         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2815                sizeof(qcaps->mac_address));
2816         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2817         qcaps->max_rx_rings = fcfg->num_rx_rings;
2818         qcaps->max_tx_rings = fcfg->num_tx_rings;
2819         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2820         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2821         qcaps->max_vfs = 0;
2822         qcaps->first_vf_id = 0;
2823         qcaps->max_vnics = fcfg->num_vnics;
2824         qcaps->max_decap_records = 0;
2825         qcaps->max_encap_records = 0;
2826         qcaps->max_tx_wm_flows = 0;
2827         qcaps->max_tx_em_flows = 0;
2828         qcaps->max_rx_wm_flows = 0;
2829         qcaps->max_rx_em_flows = 0;
2830         qcaps->max_flow_id = 0;
2831         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2832         qcaps->max_sp_tx_rings = 0;
2833         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2834 }
2835
2836 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2837 {
2838         struct hwrm_func_cfg_input req = {0};
2839         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2840         uint32_t enables;
2841         int rc;
2842
2843         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2844                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2845                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2846                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2847                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2848                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2849                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2850                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2851                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
2852
2853         if (BNXT_HAS_RING_GRPS(bp)) {
2854                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
2855                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2856         } else if (BNXT_HAS_NQ(bp)) {
2857                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
2858                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
2859         }
2860
2861         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2862         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2863         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2864                                    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2865                                    BNXT_NUM_VLANS);
2866         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2867         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2868         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2869         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2870         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2871         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2872         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2873         req.fid = rte_cpu_to_le_16(0xffff);
2874         req.enables = rte_cpu_to_le_32(enables);
2875
2876         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2877
2878         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2879
2880         HWRM_CHECK_RESULT();
2881         HWRM_UNLOCK();
2882
2883         return rc;
2884 }
2885
2886 static void populate_vf_func_cfg_req(struct bnxt *bp,
2887                                      struct hwrm_func_cfg_input *req,
2888                                      int num_vfs)
2889 {
2890         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2891                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2892                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2893                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2894                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2895                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2896                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2897                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2898                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2899                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2900
2901         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2902                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2903                                     BNXT_NUM_VLANS);
2904         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2905                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2906                                     BNXT_NUM_VLANS);
2907         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2908                                                 (num_vfs + 1));
2909         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2910         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2911                                                (num_vfs + 1));
2912         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2913         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2914         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2915         /* TODO: For now, do not support VMDq/RFS on VFs. */
2916         req->num_vnics = rte_cpu_to_le_16(1);
2917         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2918                                                  (num_vfs + 1));
2919 }
2920
2921 static void add_random_mac_if_needed(struct bnxt *bp,
2922                                      struct hwrm_func_cfg_input *cfg_req,
2923                                      int vf)
2924 {
2925         struct rte_ether_addr mac;
2926
2927         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2928                 return;
2929
2930         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2931                 cfg_req->enables |=
2932                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2933                 rte_eth_random_addr(cfg_req->dflt_mac_addr);
2934                 bp->pf.vf_info[vf].random_mac = true;
2935         } else {
2936                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
2937                         RTE_ETHER_ADDR_LEN);
2938         }
2939 }
2940
2941 static void reserve_resources_from_vf(struct bnxt *bp,
2942                                       struct hwrm_func_cfg_input *cfg_req,
2943                                       int vf)
2944 {
2945         struct hwrm_func_qcaps_input req = {0};
2946         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2947         int rc;
2948
2949         /* Get the actual allocated values now */
2950         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2951         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2952         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2953
2954         if (rc) {
2955                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2956                 copy_func_cfg_to_qcaps(cfg_req, resp);
2957         } else if (resp->error_code) {
2958                 rc = rte_le_to_cpu_16(resp->error_code);
2959                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2960                 copy_func_cfg_to_qcaps(cfg_req, resp);
2961         }
2962
2963         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2964         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2965         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2966         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2967         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2968         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2969         /*
2970          * TODO: While not supporting VMDq with VFs, max_vnics is always
2971          * forced to 1 in this case
2972          */
2973         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2974         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2975
2976         HWRM_UNLOCK();
2977 }
2978
2979 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2980 {
2981         struct hwrm_func_qcfg_input req = {0};
2982         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2983         int rc;
2984
2985         /* Check for zero MAC address */
2986         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2987         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2988         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2989         HWRM_CHECK_RESULT();
2990         rc = rte_le_to_cpu_16(resp->vlan);
2991
2992         HWRM_UNLOCK();
2993
2994         return rc;
2995 }
2996
2997 static int update_pf_resource_max(struct bnxt *bp)
2998 {
2999         struct hwrm_func_qcfg_input req = {0};
3000         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3001         int rc;
3002
3003         /* And copy the allocated numbers into the pf struct */
3004         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3005         req.fid = rte_cpu_to_le_16(0xffff);
3006         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3007         HWRM_CHECK_RESULT();
3008
3009         /* Only TX ring value reflects actual allocation? TODO */
3010         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3011         bp->pf.evb_mode = resp->evb_mode;
3012
3013         HWRM_UNLOCK();
3014
3015         return rc;
3016 }
3017
3018 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3019 {
3020         int rc;
3021
3022         if (!BNXT_PF(bp)) {
3023                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3024                 return -EINVAL;
3025         }
3026
3027         rc = bnxt_hwrm_func_qcaps(bp);
3028         if (rc)
3029                 return rc;
3030
3031         bp->pf.func_cfg_flags &=
3032                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3033                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3034         bp->pf.func_cfg_flags |=
3035                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3036         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3037         rc = __bnxt_hwrm_func_qcaps(bp);
3038         return rc;
3039 }
3040
3041 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3042 {
3043         struct hwrm_func_cfg_input req = {0};
3044         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3045         int i;
3046         size_t sz;
3047         int rc = 0;
3048         size_t req_buf_sz;
3049
3050         if (!BNXT_PF(bp)) {
3051                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3052                 return -EINVAL;
3053         }
3054
3055         rc = bnxt_hwrm_func_qcaps(bp);
3056
3057         if (rc)
3058                 return rc;
3059
3060         bp->pf.active_vfs = num_vfs;
3061
3062         /*
3063          * First, configure the PF to only use one TX ring.  This ensures that
3064          * there are enough rings for all VFs.
3065          *
3066          * If we don't do this, when we call func_alloc() later, we will lock
3067          * extra rings to the PF that won't be available during func_cfg() of
3068          * the VFs.
3069          *
3070          * This has been fixed with firmware versions above 20.6.54
3071          */
3072         bp->pf.func_cfg_flags &=
3073                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3074                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3075         bp->pf.func_cfg_flags |=
3076                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3077         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3078         if (rc)
3079                 return rc;
3080
3081         /*
3082          * Now, create and register a buffer to hold forwarded VF requests
3083          */
3084         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3085         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3086                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3087         if (bp->pf.vf_req_buf == NULL) {
3088                 rc = -ENOMEM;
3089                 goto error_free;
3090         }
3091         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3092                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
3093         for (i = 0; i < num_vfs; i++)
3094                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
3095                                         (i * HWRM_MAX_REQ_LEN);
3096
3097         rc = bnxt_hwrm_func_buf_rgtr(bp);
3098         if (rc)
3099                 goto error_free;
3100
3101         populate_vf_func_cfg_req(bp, &req, num_vfs);
3102
3103         bp->pf.active_vfs = 0;
3104         for (i = 0; i < num_vfs; i++) {
3105                 add_random_mac_if_needed(bp, &req, i);
3106
3107                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3108                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
3109                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
3110                 rc = bnxt_hwrm_send_message(bp,
3111                                             &req,
3112                                             sizeof(req),
3113                                             BNXT_USE_CHIMP_MB);
3114
3115                 /* Clear enable flag for next pass */
3116                 req.enables &= ~rte_cpu_to_le_32(
3117                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3118
3119                 if (rc || resp->error_code) {
3120                         PMD_DRV_LOG(ERR,
3121                                 "Failed to initizlie VF %d\n", i);
3122                         PMD_DRV_LOG(ERR,
3123                                 "Not all VFs available. (%d, %d)\n",
3124                                 rc, resp->error_code);
3125                         HWRM_UNLOCK();
3126                         break;
3127                 }
3128
3129                 HWRM_UNLOCK();
3130
3131                 reserve_resources_from_vf(bp, &req, i);
3132                 bp->pf.active_vfs++;
3133                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
3134         }
3135
3136         /*
3137          * Now configure the PF to use "the rest" of the resources
3138          * We're using STD_TX_RING_MODE here though which will limit the TX
3139          * rings.  This will allow QoS to function properly.  Not setting this
3140          * will cause PF rings to break bandwidth settings.
3141          */
3142         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3143         if (rc)
3144                 goto error_free;
3145
3146         rc = update_pf_resource_max(bp);
3147         if (rc)
3148                 goto error_free;
3149
3150         return rc;
3151
3152 error_free:
3153         bnxt_hwrm_func_buf_unrgtr(bp);
3154         return rc;
3155 }
3156
3157 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3158 {
3159         struct hwrm_func_cfg_input req = {0};
3160         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3161         int rc;
3162
3163         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3164
3165         req.fid = rte_cpu_to_le_16(0xffff);
3166         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3167         req.evb_mode = bp->pf.evb_mode;
3168
3169         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3170         HWRM_CHECK_RESULT();
3171         HWRM_UNLOCK();
3172
3173         return rc;
3174 }
3175
3176 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3177                                 uint8_t tunnel_type)
3178 {
3179         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3180         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3181         int rc = 0;
3182
3183         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3184         req.tunnel_type = tunnel_type;
3185         req.tunnel_dst_port_val = port;
3186         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3187         HWRM_CHECK_RESULT();
3188
3189         switch (tunnel_type) {
3190         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3191                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3192                 bp->vxlan_port = port;
3193                 break;
3194         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3195                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
3196                 bp->geneve_port = port;
3197                 break;
3198         default:
3199                 break;
3200         }
3201
3202         HWRM_UNLOCK();
3203
3204         return rc;
3205 }
3206
3207 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3208                                 uint8_t tunnel_type)
3209 {
3210         struct hwrm_tunnel_dst_port_free_input req = {0};
3211         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3212         int rc = 0;
3213
3214         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3215
3216         req.tunnel_type = tunnel_type;
3217         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3218         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3219
3220         HWRM_CHECK_RESULT();
3221         HWRM_UNLOCK();
3222
3223         return rc;
3224 }
3225
3226 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3227                                         uint32_t flags)
3228 {
3229         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3230         struct hwrm_func_cfg_input req = {0};
3231         int rc;
3232
3233         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3234
3235         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3236         req.flags = rte_cpu_to_le_32(flags);
3237         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3238
3239         HWRM_CHECK_RESULT();
3240         HWRM_UNLOCK();
3241
3242         return rc;
3243 }
3244
3245 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3246 {
3247         uint32_t *flag = flagp;
3248
3249         vnic->flags = *flag;
3250 }
3251
3252 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3253 {
3254         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3255 }
3256
3257 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3258 {
3259         int rc = 0;
3260         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3261         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3262
3263         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3264
3265         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3266         req.req_buf_page_size = rte_cpu_to_le_16(
3267                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
3268         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3269         req.req_buf_page_addr0 =
3270                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
3271         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3272                 PMD_DRV_LOG(ERR,
3273                         "unable to map buffer address to physical memory\n");
3274                 return -ENOMEM;
3275         }
3276
3277         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3278
3279         HWRM_CHECK_RESULT();
3280         HWRM_UNLOCK();
3281
3282         return rc;
3283 }
3284
3285 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3286 {
3287         int rc = 0;
3288         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3289         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3290
3291         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3292                 return 0;
3293
3294         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3295
3296         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3297
3298         HWRM_CHECK_RESULT();
3299         HWRM_UNLOCK();
3300
3301         return rc;
3302 }
3303
3304 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3305 {
3306         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3307         struct hwrm_func_cfg_input req = {0};
3308         int rc;
3309
3310         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3311
3312         req.fid = rte_cpu_to_le_16(0xffff);
3313         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
3314         req.enables = rte_cpu_to_le_32(
3315                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3316         req.async_event_cr = rte_cpu_to_le_16(
3317                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3318         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3319
3320         HWRM_CHECK_RESULT();
3321         HWRM_UNLOCK();
3322
3323         return rc;
3324 }
3325
3326 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3327 {
3328         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3329         struct hwrm_func_vf_cfg_input req = {0};
3330         int rc;
3331
3332         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3333
3334         req.enables = rte_cpu_to_le_32(
3335                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3336         req.async_event_cr = rte_cpu_to_le_16(
3337                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3338         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3339
3340         HWRM_CHECK_RESULT();
3341         HWRM_UNLOCK();
3342
3343         return rc;
3344 }
3345
3346 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3347 {
3348         struct hwrm_func_cfg_input req = {0};
3349         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3350         uint16_t dflt_vlan, fid;
3351         uint32_t func_cfg_flags;
3352         int rc = 0;
3353
3354         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3355
3356         if (is_vf) {
3357                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3358                 fid = bp->pf.vf_info[vf].fid;
3359                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3360         } else {
3361                 fid = rte_cpu_to_le_16(0xffff);
3362                 func_cfg_flags = bp->pf.func_cfg_flags;
3363                 dflt_vlan = bp->vlan;
3364         }
3365
3366         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3367         req.fid = rte_cpu_to_le_16(fid);
3368         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3369         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3370
3371         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3372
3373         HWRM_CHECK_RESULT();
3374         HWRM_UNLOCK();
3375
3376         return rc;
3377 }
3378
3379 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3380                         uint16_t max_bw, uint16_t enables)
3381 {
3382         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3383         struct hwrm_func_cfg_input req = {0};
3384         int rc;
3385
3386         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3387
3388         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3389         req.enables |= rte_cpu_to_le_32(enables);
3390         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3391         req.max_bw = rte_cpu_to_le_32(max_bw);
3392         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3393
3394         HWRM_CHECK_RESULT();
3395         HWRM_UNLOCK();
3396
3397         return rc;
3398 }
3399
3400 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3401 {
3402         struct hwrm_func_cfg_input req = {0};
3403         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3404         int rc = 0;
3405
3406         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3407
3408         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3409         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3410         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3411         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3412
3413         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3414
3415         HWRM_CHECK_RESULT();
3416         HWRM_UNLOCK();
3417
3418         return rc;
3419 }
3420
3421 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3422 {
3423         int rc;
3424
3425         if (BNXT_PF(bp))
3426                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3427         else
3428                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3429
3430         return rc;
3431 }
3432
3433 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3434                               void *encaped, size_t ec_size)
3435 {
3436         int rc = 0;
3437         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3438         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3439
3440         if (ec_size > sizeof(req.encap_request))
3441                 return -1;
3442
3443         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3444
3445         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3446         memcpy(req.encap_request, encaped, ec_size);
3447
3448         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3449
3450         HWRM_CHECK_RESULT();
3451         HWRM_UNLOCK();
3452
3453         return rc;
3454 }
3455
3456 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3457                                        struct rte_ether_addr *mac)
3458 {
3459         struct hwrm_func_qcfg_input req = {0};
3460         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3461         int rc;
3462
3463         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3464
3465         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3466         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3467
3468         HWRM_CHECK_RESULT();
3469
3470         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3471
3472         HWRM_UNLOCK();
3473
3474         return rc;
3475 }
3476
3477 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3478                             void *encaped, size_t ec_size)
3479 {
3480         int rc = 0;
3481         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3482         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3483
3484         if (ec_size > sizeof(req.encap_request))
3485                 return -1;
3486
3487         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3488
3489         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3490         memcpy(req.encap_request, encaped, ec_size);
3491
3492         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3493
3494         HWRM_CHECK_RESULT();
3495         HWRM_UNLOCK();
3496
3497         return rc;
3498 }
3499
3500 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3501                          struct rte_eth_stats *stats, uint8_t rx)
3502 {
3503         int rc = 0;
3504         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3505         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3506
3507         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3508
3509         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3510
3511         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3512
3513         HWRM_CHECK_RESULT();
3514
3515         if (rx) {
3516                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3517                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3518                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3519                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3520                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3521                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3522                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3523                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3524         } else {
3525                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3526                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3527                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3528                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3529                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3530                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3531         }
3532
3533
3534         HWRM_UNLOCK();
3535
3536         return rc;
3537 }
3538
3539 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3540 {
3541         struct hwrm_port_qstats_input req = {0};
3542         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3543         struct bnxt_pf_info *pf = &bp->pf;
3544         int rc;
3545
3546         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3547
3548         req.port_id = rte_cpu_to_le_16(pf->port_id);
3549         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3550         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3551         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3552
3553         HWRM_CHECK_RESULT();
3554         HWRM_UNLOCK();
3555
3556         return rc;
3557 }
3558
3559 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3560 {
3561         struct hwrm_port_clr_stats_input req = {0};
3562         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3563         struct bnxt_pf_info *pf = &bp->pf;
3564         int rc;
3565
3566         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3567         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3568             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3569                 return 0;
3570
3571         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3572
3573         req.port_id = rte_cpu_to_le_16(pf->port_id);
3574         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3575
3576         HWRM_CHECK_RESULT();
3577         HWRM_UNLOCK();
3578
3579         return rc;
3580 }
3581
3582 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3583 {
3584         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3585         struct hwrm_port_led_qcaps_input req = {0};
3586         int rc;
3587
3588         if (BNXT_VF(bp))
3589                 return 0;
3590
3591         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3592         req.port_id = bp->pf.port_id;
3593         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3594
3595         HWRM_CHECK_RESULT();
3596
3597         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3598                 unsigned int i;
3599
3600                 bp->num_leds = resp->num_leds;
3601                 memcpy(bp->leds, &resp->led0_id,
3602                         sizeof(bp->leds[0]) * bp->num_leds);
3603                 for (i = 0; i < bp->num_leds; i++) {
3604                         struct bnxt_led_info *led = &bp->leds[i];
3605
3606                         uint16_t caps = led->led_state_caps;
3607
3608                         if (!led->led_group_id ||
3609                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3610                                 bp->num_leds = 0;
3611                                 break;
3612                         }
3613                 }
3614         }
3615
3616         HWRM_UNLOCK();
3617
3618         return rc;
3619 }
3620
3621 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3622 {
3623         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3624         struct hwrm_port_led_cfg_input req = {0};
3625         struct bnxt_led_cfg *led_cfg;
3626         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3627         uint16_t duration = 0;
3628         int rc, i;
3629
3630         if (!bp->num_leds || BNXT_VF(bp))
3631                 return -EOPNOTSUPP;
3632
3633         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3634
3635         if (led_on) {
3636                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3637                 duration = rte_cpu_to_le_16(500);
3638         }
3639         req.port_id = bp->pf.port_id;
3640         req.num_leds = bp->num_leds;
3641         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3642         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3643                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3644                 led_cfg->led_id = bp->leds[i].led_id;
3645                 led_cfg->led_state = led_state;
3646                 led_cfg->led_blink_on = duration;
3647                 led_cfg->led_blink_off = duration;
3648                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3649         }
3650
3651         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3652
3653         HWRM_CHECK_RESULT();
3654         HWRM_UNLOCK();
3655
3656         return rc;
3657 }
3658
3659 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3660                                uint32_t *length)
3661 {
3662         int rc;
3663         struct hwrm_nvm_get_dir_info_input req = {0};
3664         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3665
3666         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3667
3668         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3669
3670         HWRM_CHECK_RESULT();
3671
3672         *entries = rte_le_to_cpu_32(resp->entries);
3673         *length = rte_le_to_cpu_32(resp->entry_length);
3674
3675         HWRM_UNLOCK();
3676         return rc;
3677 }
3678
3679 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3680 {
3681         int rc;
3682         uint32_t dir_entries;
3683         uint32_t entry_length;
3684         uint8_t *buf;
3685         size_t buflen;
3686         rte_iova_t dma_handle;
3687         struct hwrm_nvm_get_dir_entries_input req = {0};
3688         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3689
3690         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3691         if (rc != 0)
3692                 return rc;
3693
3694         *data++ = dir_entries;
3695         *data++ = entry_length;
3696         len -= 2;
3697         memset(data, 0xff, len);
3698
3699         buflen = dir_entries * entry_length;
3700         buf = rte_malloc("nvm_dir", buflen, 0);
3701         rte_mem_lock_page(buf);
3702         if (buf == NULL)
3703                 return -ENOMEM;
3704         dma_handle = rte_mem_virt2iova(buf);
3705         if (dma_handle == RTE_BAD_IOVA) {
3706                 PMD_DRV_LOG(ERR,
3707                         "unable to map response address to physical memory\n");
3708                 return -ENOMEM;
3709         }
3710         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3711         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3712         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3713
3714         if (rc == 0)
3715                 memcpy(data, buf, len > buflen ? buflen : len);
3716
3717         rte_free(buf);
3718         HWRM_CHECK_RESULT();
3719         HWRM_UNLOCK();
3720
3721         return rc;
3722 }
3723
3724 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3725                              uint32_t offset, uint32_t length,
3726                              uint8_t *data)
3727 {
3728         int rc;
3729         uint8_t *buf;
3730         rte_iova_t dma_handle;
3731         struct hwrm_nvm_read_input req = {0};
3732         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3733
3734         buf = rte_malloc("nvm_item", length, 0);
3735         rte_mem_lock_page(buf);
3736         if (!buf)
3737                 return -ENOMEM;
3738
3739         dma_handle = rte_mem_virt2iova(buf);
3740         if (dma_handle == RTE_BAD_IOVA) {
3741                 PMD_DRV_LOG(ERR,
3742                         "unable to map response address to physical memory\n");
3743                 return -ENOMEM;
3744         }
3745         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3746         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3747         req.dir_idx = rte_cpu_to_le_16(index);
3748         req.offset = rte_cpu_to_le_32(offset);
3749         req.len = rte_cpu_to_le_32(length);
3750         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3751         if (rc == 0)
3752                 memcpy(data, buf, length);
3753
3754         rte_free(buf);
3755         HWRM_CHECK_RESULT();
3756         HWRM_UNLOCK();
3757
3758         return rc;
3759 }
3760
3761 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3762 {
3763         int rc;
3764         struct hwrm_nvm_erase_dir_entry_input req = {0};
3765         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3766
3767         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3768         req.dir_idx = rte_cpu_to_le_16(index);
3769         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3770         HWRM_CHECK_RESULT();
3771         HWRM_UNLOCK();
3772
3773         return rc;
3774 }
3775
3776
3777 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3778                           uint16_t dir_ordinal, uint16_t dir_ext,
3779                           uint16_t dir_attr, const uint8_t *data,
3780                           size_t data_len)
3781 {
3782         int rc;
3783         struct hwrm_nvm_write_input req = {0};
3784         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3785         rte_iova_t dma_handle;
3786         uint8_t *buf;
3787
3788         buf = rte_malloc("nvm_write", data_len, 0);
3789         rte_mem_lock_page(buf);
3790         if (!buf)
3791                 return -ENOMEM;
3792
3793         dma_handle = rte_mem_virt2iova(buf);
3794         if (dma_handle == RTE_BAD_IOVA) {
3795                 PMD_DRV_LOG(ERR,
3796                         "unable to map response address to physical memory\n");
3797                 return -ENOMEM;
3798         }
3799         memcpy(buf, data, data_len);
3800
3801         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3802
3803         req.dir_type = rte_cpu_to_le_16(dir_type);
3804         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3805         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3806         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3807         req.dir_data_length = rte_cpu_to_le_32(data_len);
3808         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3809
3810         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3811
3812         rte_free(buf);
3813         HWRM_CHECK_RESULT();
3814         HWRM_UNLOCK();
3815
3816         return rc;
3817 }
3818
3819 static void
3820 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3821 {
3822         uint32_t *count = cbdata;
3823
3824         *count = *count + 1;
3825 }
3826
3827 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3828                                      struct bnxt_vnic_info *vnic __rte_unused)
3829 {
3830         return 0;
3831 }
3832
3833 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3834 {
3835         uint32_t count = 0;
3836
3837         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3838             &count, bnxt_vnic_count_hwrm_stub);
3839
3840         return count;
3841 }
3842
3843 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3844                                         uint16_t *vnic_ids)
3845 {
3846         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3847         struct hwrm_func_vf_vnic_ids_query_output *resp =
3848                                                 bp->hwrm_cmd_resp_addr;
3849         int rc;
3850
3851         /* First query all VNIC ids */
3852         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3853
3854         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3855         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3856         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3857
3858         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
3859                 HWRM_UNLOCK();
3860                 PMD_DRV_LOG(ERR,
3861                 "unable to map VNIC ID table address to physical memory\n");
3862                 return -ENOMEM;
3863         }
3864         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3865         HWRM_CHECK_RESULT();
3866         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3867
3868         HWRM_UNLOCK();
3869
3870         return rc;
3871 }
3872
3873 /*
3874  * This function queries the VNIC IDs  for a specified VF. It then calls
3875  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3876  * Then it calls the hwrm_cb function to program this new vnic configuration.
3877  */
3878 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3879         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3880         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3881 {
3882         struct bnxt_vnic_info vnic;
3883         int rc = 0;
3884         int i, num_vnic_ids;
3885         uint16_t *vnic_ids;
3886         size_t vnic_id_sz;
3887         size_t sz;
3888
3889         /* First query all VNIC ids */
3890         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3891         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3892                         RTE_CACHE_LINE_SIZE);
3893         if (vnic_ids == NULL)
3894                 return -ENOMEM;
3895
3896         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3897                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3898
3899         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3900
3901         if (num_vnic_ids < 0)
3902                 return num_vnic_ids;
3903
3904         /* Retrieve VNIC, update bd_stall then update */
3905
3906         for (i = 0; i < num_vnic_ids; i++) {
3907                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3908                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3909                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3910                 if (rc)
3911                         break;
3912                 if (vnic.mru <= 4)      /* Indicates unallocated */
3913                         continue;
3914
3915                 vnic_cb(&vnic, cbdata);
3916
3917                 rc = hwrm_cb(bp, &vnic);
3918                 if (rc)
3919                         break;
3920         }
3921
3922         rte_free(vnic_ids);
3923
3924         return rc;
3925 }
3926
3927 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3928                                               bool on)
3929 {
3930         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3931         struct hwrm_func_cfg_input req = {0};
3932         int rc;
3933
3934         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3935
3936         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3937         req.enables |= rte_cpu_to_le_32(
3938                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3939         req.vlan_antispoof_mode = on ?
3940                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3941                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3942         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3943
3944         HWRM_CHECK_RESULT();
3945         HWRM_UNLOCK();
3946
3947         return rc;
3948 }
3949
3950 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3951 {
3952         struct bnxt_vnic_info vnic;
3953         uint16_t *vnic_ids;
3954         size_t vnic_id_sz;
3955         int num_vnic_ids, i;
3956         size_t sz;
3957         int rc;
3958
3959         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3960         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3961                         RTE_CACHE_LINE_SIZE);
3962         if (vnic_ids == NULL)
3963                 return -ENOMEM;
3964
3965         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3966                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3967
3968         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3969         if (rc <= 0)
3970                 goto exit;
3971         num_vnic_ids = rc;
3972
3973         /*
3974          * Loop through to find the default VNIC ID.
3975          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3976          * by sending the hwrm_func_qcfg command to the firmware.
3977          */
3978         for (i = 0; i < num_vnic_ids; i++) {
3979                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3980                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3981                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3982                                         bp->pf.first_vf_id + vf);
3983                 if (rc)
3984                         goto exit;
3985                 if (vnic.func_default) {
3986                         rte_free(vnic_ids);
3987                         return vnic.fw_vnic_id;
3988                 }
3989         }
3990         /* Could not find a default VNIC. */
3991         PMD_DRV_LOG(ERR, "No default VNIC\n");
3992 exit:
3993         rte_free(vnic_ids);
3994         return rc;
3995 }
3996
3997 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3998                          uint16_t dst_id,
3999                          struct bnxt_filter_info *filter)
4000 {
4001         int rc = 0;
4002         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4003         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4004         uint32_t enables = 0;
4005
4006         if (filter->fw_em_filter_id != UINT64_MAX)
4007                 bnxt_hwrm_clear_em_filter(bp, filter);
4008
4009         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4010
4011         req.flags = rte_cpu_to_le_32(filter->flags);
4012
4013         enables = filter->enables |
4014               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4015         req.dst_id = rte_cpu_to_le_16(dst_id);
4016
4017         if (filter->ip_addr_type) {
4018                 req.ip_addr_type = filter->ip_addr_type;
4019                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4020         }
4021         if (enables &
4022             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4023                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4024         if (enables &
4025             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4026                 memcpy(req.src_macaddr, filter->src_macaddr,
4027                        RTE_ETHER_ADDR_LEN);
4028         if (enables &
4029             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4030                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4031                        RTE_ETHER_ADDR_LEN);
4032         if (enables &
4033             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4034                 req.ovlan_vid = filter->l2_ovlan;
4035         if (enables &
4036             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4037                 req.ivlan_vid = filter->l2_ivlan;
4038         if (enables &
4039             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4040                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4041         if (enables &
4042             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4043                 req.ip_protocol = filter->ip_protocol;
4044         if (enables &
4045             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4046                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4047         if (enables &
4048             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4049                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4050         if (enables &
4051             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4052                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4053         if (enables &
4054             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4055                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4056         if (enables &
4057             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4058                 req.mirror_vnic_id = filter->mirror_vnic_id;
4059
4060         req.enables = rte_cpu_to_le_32(enables);
4061
4062         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4063
4064         HWRM_CHECK_RESULT();
4065
4066         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4067         HWRM_UNLOCK();
4068
4069         return rc;
4070 }
4071
4072 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4073 {
4074         int rc = 0;
4075         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4076         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4077
4078         if (filter->fw_em_filter_id == UINT64_MAX)
4079                 return 0;
4080
4081         PMD_DRV_LOG(ERR, "Clear EM filter\n");
4082         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4083
4084         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4085
4086         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4087
4088         HWRM_CHECK_RESULT();
4089         HWRM_UNLOCK();
4090
4091         filter->fw_em_filter_id = UINT64_MAX;
4092         filter->fw_l2_filter_id = UINT64_MAX;
4093
4094         return 0;
4095 }
4096
4097 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4098                          uint16_t dst_id,
4099                          struct bnxt_filter_info *filter)
4100 {
4101         int rc = 0;
4102         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4103         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4104                                                 bp->hwrm_cmd_resp_addr;
4105         uint32_t enables = 0;
4106
4107         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4108                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4109
4110         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4111
4112         req.flags = rte_cpu_to_le_32(filter->flags);
4113
4114         enables = filter->enables |
4115               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4116         req.dst_id = rte_cpu_to_le_16(dst_id);
4117
4118
4119         if (filter->ip_addr_type) {
4120                 req.ip_addr_type = filter->ip_addr_type;
4121                 enables |=
4122                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4123         }
4124         if (enables &
4125             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4126                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4127         if (enables &
4128             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4129                 memcpy(req.src_macaddr, filter->src_macaddr,
4130                        RTE_ETHER_ADDR_LEN);
4131         //if (enables &
4132             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
4133                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
4134                        //RTE_ETHER_ADDR_LEN);
4135         if (enables &
4136             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4137                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4138         if (enables &
4139             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4140                 req.ip_protocol = filter->ip_protocol;
4141         if (enables &
4142             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4143                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4144         if (enables &
4145             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4146                 req.src_ipaddr_mask[0] =
4147                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4148         if (enables &
4149             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4150                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4151         if (enables &
4152             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4153                 req.dst_ipaddr_mask[0] =
4154                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4155         if (enables &
4156             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4157                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4158         if (enables &
4159             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4160                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4161         if (enables &
4162             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4163                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4164         if (enables &
4165             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4166                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4167         if (enables &
4168             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4169                 req.mirror_vnic_id = filter->mirror_vnic_id;
4170
4171         req.enables = rte_cpu_to_le_32(enables);
4172
4173         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4174
4175         HWRM_CHECK_RESULT();
4176
4177         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4178         HWRM_UNLOCK();
4179
4180         return rc;
4181 }
4182
4183 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4184                                 struct bnxt_filter_info *filter)
4185 {
4186         int rc = 0;
4187         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4188         struct hwrm_cfa_ntuple_filter_free_output *resp =
4189                                                 bp->hwrm_cmd_resp_addr;
4190
4191         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4192                 return 0;
4193
4194         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4195
4196         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4197
4198         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4199
4200         HWRM_CHECK_RESULT();
4201         HWRM_UNLOCK();
4202
4203         filter->fw_ntuple_filter_id = UINT64_MAX;
4204
4205         return 0;
4206 }
4207
4208 static int
4209 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4210 {
4211         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4212         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4213         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4214         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4215         uint16_t *ring_tbl = vnic->rss_table;
4216         int nr_ctxs = vnic->num_lb_ctxts;
4217         int max_rings = bp->rx_nr_rings;
4218         int i, j, k, cnt;
4219         int rc = 0;
4220
4221         for (i = 0, k = 0; i < nr_ctxs; i++) {
4222                 struct bnxt_rx_ring_info *rxr;
4223                 struct bnxt_cp_ring_info *cpr;
4224
4225                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4226
4227                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4228                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4229                 req.hash_mode_flags = vnic->hash_mode;
4230
4231                 req.ring_grp_tbl_addr =
4232                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4233                                      i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4234                                      2 * sizeof(*ring_tbl));
4235                 req.hash_key_tbl_addr =
4236                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4237
4238                 req.ring_table_pair_index = i;
4239                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4240
4241                 for (j = 0; j < 64; j++) {
4242                         uint16_t ring_id;
4243
4244                         /* Find next active ring. */
4245                         for (cnt = 0; cnt < max_rings; cnt++) {
4246                                 if (rx_queue_state[k] !=
4247                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4248                                         break;
4249                                 if (++k == max_rings)
4250                                         k = 0;
4251                         }
4252
4253                         /* Return if no rings are active. */
4254                         if (cnt == max_rings)
4255                                 return 0;
4256
4257                         /* Add rx/cp ring pair to RSS table. */
4258                         rxr = rxqs[k]->rx_ring;
4259                         cpr = rxqs[k]->cp_ring;
4260
4261                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4262                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4263                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4264                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4265
4266                         if (++k == max_rings)
4267                                 k = 0;
4268                 }
4269                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4270                                             BNXT_USE_CHIMP_MB);
4271
4272                 HWRM_CHECK_RESULT();
4273                 HWRM_UNLOCK();
4274         }
4275
4276         return rc;
4277 }
4278
4279 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4280 {
4281         unsigned int rss_idx, fw_idx, i;
4282
4283         if (!(vnic->rss_table && vnic->hash_type))
4284                 return 0;
4285
4286         if (BNXT_CHIP_THOR(bp))
4287                 return bnxt_vnic_rss_configure_thor(bp, vnic);
4288
4289         /*
4290          * Fill the RSS hash & redirection table with
4291          * ring group ids for all VNICs
4292          */
4293         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4294                 rss_idx++, fw_idx++) {
4295                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4296                         fw_idx %= bp->rx_cp_nr_rings;
4297                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
4298                                 break;
4299                         fw_idx++;
4300                 }
4301                 if (i == bp->rx_cp_nr_rings)
4302                         return 0;
4303                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4304         }
4305         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4306 }
4307
4308 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4309         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4310 {
4311         uint16_t flags;
4312
4313         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4314
4315         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4316         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4317
4318         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4319         req->num_cmpl_dma_aggr_during_int =
4320                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4321
4322         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4323
4324         /* min timer set to 1/2 of interrupt timer */
4325         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4326
4327         /* buf timer set to 1/4 of interrupt timer */
4328         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4329
4330         req->cmpl_aggr_dma_tmr_during_int =
4331                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4332
4333         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4334                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4335         req->flags = rte_cpu_to_le_16(flags);
4336 }
4337
4338 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4339                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4340 {
4341         struct hwrm_ring_aggint_qcaps_input req = {0};
4342         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4343         uint32_t enables;
4344         uint16_t flags;
4345         int rc;
4346
4347         HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4348         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4349         HWRM_CHECK_RESULT();
4350
4351         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4352         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4353
4354         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4355                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4356         agg_req->flags = rte_cpu_to_le_16(flags);
4357         enables =
4358          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4359          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4360         agg_req->enables = rte_cpu_to_le_32(enables);
4361
4362         HWRM_UNLOCK();
4363         return rc;
4364 }
4365
4366 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4367                         struct bnxt_coal *coal, uint16_t ring_id)
4368 {
4369         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4370         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4371                                                 bp->hwrm_cmd_resp_addr;
4372         int rc;
4373
4374         /* Set ring coalesce parameters only for 100G NICs */
4375         if (BNXT_CHIP_THOR(bp)) {
4376                 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4377                         return -1;
4378         } else if (bnxt_stratus_device(bp)) {
4379                 bnxt_hwrm_set_coal_params(coal, &req);
4380         } else {
4381                 return 0;
4382         }
4383
4384         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
4385         req.ring_id = rte_cpu_to_le_16(ring_id);
4386         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4387         HWRM_CHECK_RESULT();
4388         HWRM_UNLOCK();
4389         return 0;
4390 }
4391
4392 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4393 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4394 {
4395         struct hwrm_func_backing_store_qcaps_input req = {0};
4396         struct hwrm_func_backing_store_qcaps_output *resp =
4397                 bp->hwrm_cmd_resp_addr;
4398         struct bnxt_ctx_pg_info *ctx_pg;
4399         struct bnxt_ctx_mem_info *ctx;
4400         int total_alloc_len;
4401         int rc, i;
4402
4403         if (!BNXT_CHIP_THOR(bp) ||
4404             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4405             BNXT_VF(bp) ||
4406             bp->ctx)
4407                 return 0;
4408
4409         HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4410         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4411         HWRM_CHECK_RESULT_SILENT();
4412
4413         total_alloc_len = sizeof(*ctx);
4414         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
4415                           RTE_CACHE_LINE_SIZE);
4416         if (!ctx) {
4417                 rc = -ENOMEM;
4418                 goto ctx_err;
4419         }
4420
4421         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4422                             sizeof(*ctx_pg) * BNXT_MAX_Q,
4423                             RTE_CACHE_LINE_SIZE);
4424         if (!ctx_pg) {
4425                 rc = -ENOMEM;
4426                 goto ctx_err;
4427         }
4428         for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
4429                 ctx->tqm_mem[i] = ctx_pg;
4430
4431         bp->ctx = ctx;
4432         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4433         ctx->qp_min_qp1_entries =
4434                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4435         ctx->qp_max_l2_entries =
4436                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
4437         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4438         ctx->srq_max_l2_entries =
4439                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
4440         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4441         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4442         ctx->cq_max_l2_entries =
4443                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
4444         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4445         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4446         ctx->vnic_max_vnic_entries =
4447                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4448         ctx->vnic_max_ring_table_entries =
4449                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4450         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4451         ctx->stat_max_entries =
4452                 rte_le_to_cpu_32(resp->stat_max_entries);
4453         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4454         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4455         ctx->tqm_min_entries_per_ring =
4456                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4457         ctx->tqm_max_entries_per_ring =
4458                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4459         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4460         if (!ctx->tqm_entries_multiple)
4461                 ctx->tqm_entries_multiple = 1;
4462         ctx->mrav_max_entries =
4463                 rte_le_to_cpu_32(resp->mrav_max_entries);
4464         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4465         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4466         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4467 ctx_err:
4468         HWRM_UNLOCK();
4469         return rc;
4470 }
4471
4472 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4473 {
4474         struct hwrm_func_backing_store_cfg_input req = {0};
4475         struct hwrm_func_backing_store_cfg_output *resp =
4476                 bp->hwrm_cmd_resp_addr;
4477         struct bnxt_ctx_mem_info *ctx = bp->ctx;
4478         struct bnxt_ctx_pg_info *ctx_pg;
4479         uint32_t *num_entries;
4480         uint64_t *pg_dir;
4481         uint8_t *pg_attr;
4482         uint32_t ena;
4483         int i, rc;
4484
4485         if (!ctx)
4486                 return 0;
4487
4488         HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4489         req.enables = rte_cpu_to_le_32(enables);
4490
4491         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4492                 ctx_pg = &ctx->qp_mem;
4493                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4494                 req.qp_num_qp1_entries =
4495                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4496                 req.qp_num_l2_entries =
4497                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4498                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4499                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4500                                       &req.qpc_pg_size_qpc_lvl,
4501                                       &req.qpc_page_dir);
4502         }
4503
4504         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4505                 ctx_pg = &ctx->srq_mem;
4506                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4507                 req.srq_num_l2_entries =
4508                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4509                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4510                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4511                                       &req.srq_pg_size_srq_lvl,
4512                                       &req.srq_page_dir);
4513         }
4514
4515         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4516                 ctx_pg = &ctx->cq_mem;
4517                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4518                 req.cq_num_l2_entries =
4519                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4520                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4521                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4522                                       &req.cq_pg_size_cq_lvl,
4523                                       &req.cq_page_dir);
4524         }
4525
4526         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4527                 ctx_pg = &ctx->vnic_mem;
4528                 req.vnic_num_vnic_entries =
4529                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4530                 req.vnic_num_ring_table_entries =
4531                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4532                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4533                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4534                                       &req.vnic_pg_size_vnic_lvl,
4535                                       &req.vnic_page_dir);
4536         }
4537
4538         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4539                 ctx_pg = &ctx->stat_mem;
4540                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4541                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4542                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4543                                       &req.stat_pg_size_stat_lvl,
4544                                       &req.stat_page_dir);
4545         }
4546
4547         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4548         num_entries = &req.tqm_sp_num_entries;
4549         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
4550         pg_dir = &req.tqm_sp_page_dir;
4551         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
4552         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
4553                 if (!(enables & ena))
4554                         continue;
4555
4556                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4557
4558                 ctx_pg = ctx->tqm_mem[i];
4559                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
4560                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
4561         }
4562
4563         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4564         HWRM_CHECK_RESULT();
4565         HWRM_UNLOCK();
4566
4567         return rc;
4568 }
4569
4570 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
4571 {
4572         struct hwrm_port_qstats_ext_input req = {0};
4573         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
4574         struct bnxt_pf_info *pf = &bp->pf;
4575         int rc;
4576
4577         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
4578               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
4579                 return 0;
4580
4581         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
4582
4583         req.port_id = rte_cpu_to_le_16(pf->port_id);
4584         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
4585                 req.tx_stat_host_addr =
4586                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
4587                 req.tx_stat_size =
4588                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
4589         }
4590         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
4591                 req.rx_stat_host_addr =
4592                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
4593                 req.rx_stat_size =
4594                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
4595         }
4596         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4597
4598         if (rc) {
4599                 bp->fw_rx_port_stats_ext_size = 0;
4600                 bp->fw_tx_port_stats_ext_size = 0;
4601         } else {
4602                 bp->fw_rx_port_stats_ext_size =
4603                         rte_le_to_cpu_16(resp->rx_stat_size);
4604                 bp->fw_tx_port_stats_ext_size =
4605                         rte_le_to_cpu_16(resp->tx_stat_size);
4606         }
4607
4608         HWRM_CHECK_RESULT();
4609         HWRM_UNLOCK();
4610
4611         return rc;
4612 }
4613
4614 int
4615 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
4616 {
4617         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
4618         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
4619                 bp->hwrm_cmd_resp_addr;
4620         int rc = 0;
4621
4622         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
4623         req.tunnel_type = type;
4624         req.dest_fid = bp->fw_fid;
4625         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4626         HWRM_CHECK_RESULT();
4627
4628         HWRM_UNLOCK();
4629
4630         return rc;
4631 }
4632
4633 int
4634 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
4635 {
4636         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
4637         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
4638                 bp->hwrm_cmd_resp_addr;
4639         int rc = 0;
4640
4641         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
4642         req.tunnel_type = type;
4643         req.dest_fid = bp->fw_fid;
4644         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4645         HWRM_CHECK_RESULT();
4646
4647         HWRM_UNLOCK();
4648
4649         return rc;
4650 }
4651
4652 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
4653 {
4654         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
4655         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
4656                 bp->hwrm_cmd_resp_addr;
4657         int rc = 0;
4658
4659         HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
4660         req.src_fid = bp->fw_fid;
4661         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4662         HWRM_CHECK_RESULT();
4663
4664         if (type)
4665                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
4666
4667         HWRM_UNLOCK();
4668
4669         return rc;
4670 }
4671
4672 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
4673                                    uint16_t *dst_fid)
4674 {
4675         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
4676         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
4677                 bp->hwrm_cmd_resp_addr;
4678         int rc = 0;
4679
4680         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
4681         req.src_fid = bp->fw_fid;
4682         req.tunnel_type = tun_type;
4683         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4684         HWRM_CHECK_RESULT();
4685
4686         if (dst_fid)
4687                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
4688
4689         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
4690
4691         HWRM_UNLOCK();
4692
4693         return rc;
4694 }
4695
4696 int bnxt_hwrm_set_mac(struct bnxt *bp)
4697 {
4698         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4699         struct hwrm_func_vf_cfg_input req = {0};
4700         int rc = 0;
4701
4702         if (!BNXT_VF(bp))
4703                 return 0;
4704
4705         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4706
4707         req.enables =
4708                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
4709         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4710
4711         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4712
4713         HWRM_CHECK_RESULT();
4714
4715         memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4716         HWRM_UNLOCK();
4717
4718         return rc;
4719 }
4720
4721 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
4722 {
4723         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
4724         struct hwrm_func_drv_if_change_input req = {0};
4725         uint32_t flags;
4726         int rc;
4727
4728         if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
4729                 return 0;
4730
4731         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
4732          * If we issue FUNC_DRV_IF_CHANGE with flags down before
4733          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
4734          */
4735         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
4736                 return 0;
4737
4738         HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
4739
4740         if (up)
4741                 req.flags =
4742                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
4743
4744         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4745
4746         HWRM_CHECK_RESULT();
4747         flags = rte_le_to_cpu_32(resp->flags);
4748         HWRM_UNLOCK();
4749
4750         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
4751                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
4752                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
4753         }
4754
4755         return 0;
4756 }
4757
4758 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
4759 {
4760         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4761         struct bnxt_error_recovery_info *info = bp->recovery_info;
4762         struct hwrm_error_recovery_qcfg_input req = {0};
4763         uint32_t flags = 0;
4764         unsigned int i;
4765         int rc;
4766
4767         /* Older FW does not have error recovery support */
4768         if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
4769                 return 0;
4770
4771         if (!info) {
4772                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4773                                    sizeof(*info), 0);
4774                 bp->recovery_info = info;
4775                 if (info == NULL)
4776                         return -ENOMEM;
4777         } else {
4778                 memset(info, 0, sizeof(*info));
4779         }
4780
4781         HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
4782
4783         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4784
4785         HWRM_CHECK_RESULT();
4786
4787         flags = rte_le_to_cpu_32(resp->flags);
4788         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
4789                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
4790         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
4791                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
4792
4793         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
4794             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
4795                 rc = -EINVAL;
4796                 goto err;
4797         }
4798
4799         /* FW returned values are in units of 100msec */
4800         info->driver_polling_freq =
4801                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
4802         info->master_func_wait_period =
4803                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
4804         info->normal_func_wait_period =
4805                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
4806         info->master_func_wait_period_after_reset =
4807                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
4808         info->max_bailout_time_after_reset =
4809                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
4810         info->status_regs[BNXT_FW_STATUS_REG] =
4811                 rte_le_to_cpu_32(resp->fw_health_status_reg);
4812         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
4813                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
4814         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
4815                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
4816         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
4817                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
4818         info->reg_array_cnt =
4819                 rte_le_to_cpu_32(resp->reg_array_cnt);
4820
4821         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
4822                 rc = -EINVAL;
4823                 goto err;
4824         }
4825
4826         for (i = 0; i < info->reg_array_cnt; i++) {
4827                 info->reset_reg[i] =
4828                         rte_le_to_cpu_32(resp->reset_reg[i]);
4829                 info->reset_reg_val[i] =
4830                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
4831                 info->delay_after_reset[i] =
4832                         resp->delay_after_reset[i];
4833         }
4834 err:
4835         HWRM_UNLOCK();
4836
4837         /* Map the FW status registers */
4838         if (!rc)
4839                 rc = bnxt_map_fw_health_status_regs(bp);
4840
4841         if (rc) {
4842                 rte_free(bp->recovery_info);
4843                 bp->recovery_info = NULL;
4844         }
4845         return rc;
4846 }
4847
4848 int bnxt_hwrm_fw_reset(struct bnxt *bp)
4849 {
4850         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
4851         struct hwrm_fw_reset_input req = {0};
4852         int rc;
4853
4854         if (!BNXT_PF(bp))
4855                 return -EOPNOTSUPP;
4856
4857         HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp));
4858
4859         req.embedded_proc_type =
4860                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
4861         req.selfrst_status =
4862                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
4863         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
4864
4865         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4866                                     BNXT_USE_KONG(bp));
4867
4868         HWRM_CHECK_RESULT();
4869         HWRM_UNLOCK();
4870
4871         return rc;
4872 }
4873
4874 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
4875 {
4876         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
4877         struct hwrm_port_ts_query_input req = {0};
4878         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4879         uint32_t flags = 0;
4880         int rc;
4881
4882         if (!ptp)
4883                 return 0;
4884
4885         HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
4886
4887         switch (path) {
4888         case BNXT_PTP_FLAGS_PATH_TX:
4889                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
4890                 break;
4891         case BNXT_PTP_FLAGS_PATH_RX:
4892                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
4893                 break;
4894         case BNXT_PTP_FLAGS_CURRENT_TIME:
4895                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
4896                 break;
4897         }
4898
4899         req.flags = rte_cpu_to_le_32(flags);
4900         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
4901
4902         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4903
4904         HWRM_CHECK_RESULT();
4905
4906         if (timestamp) {
4907                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
4908                 *timestamp |=
4909                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
4910         }
4911         HWRM_UNLOCK();
4912
4913         return rc;
4914 }