net/bnxt: support creating SMAC and inner DMAC filters
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                6000000
30 #define HWRM_SHORT_CMD_TIMEOUT          50000
31 #define HWRM_SPEC_CODE_1_8_3            0x10803
32 #define HWRM_VERSION_1_9_1              0x10901
33 #define HWRM_VERSION_1_9_2              0x10903
34
35 struct bnxt_plcmodes_cfg {
36         uint32_t        flags;
37         uint16_t        jumbo_thresh;
38         uint16_t        hds_offset;
39         uint16_t        hds_threshold;
40 };
41
42 static int page_getenum(size_t size)
43 {
44         if (size <= 1 << 4)
45                 return 4;
46         if (size <= 1 << 12)
47                 return 12;
48         if (size <= 1 << 13)
49                 return 13;
50         if (size <= 1 << 16)
51                 return 16;
52         if (size <= 1 << 21)
53                 return 21;
54         if (size <= 1 << 22)
55                 return 22;
56         if (size <= 1 << 30)
57                 return 30;
58         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
59         return sizeof(void *) * 8 - 1;
60 }
61
62 static int page_roundup(size_t size)
63 {
64         return 1 << page_getenum(size);
65 }
66
67 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
68                                   uint8_t *pg_attr,
69                                   uint64_t *pg_dir)
70 {
71         if (rmem->nr_pages > 1) {
72                 *pg_attr = 1;
73                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
74         } else {
75                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
76         }
77 }
78
79 /*
80  * HWRM Functions (sent to HWRM)
81  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
82  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
83  * command was failed by the ChiMP.
84  */
85
86 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
87                                   uint32_t msg_len, bool use_kong_mb)
88 {
89         unsigned int i;
90         struct input *req = msg;
91         struct output *resp = bp->hwrm_cmd_resp_addr;
92         uint32_t *data = msg;
93         uint8_t *bar;
94         uint8_t *valid;
95         uint16_t max_req_len = bp->max_req_len;
96         struct hwrm_short_input short_input = { 0 };
97         uint16_t bar_offset = use_kong_mb ?
98                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
99         uint16_t mb_trigger_offset = use_kong_mb ?
100                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
101         uint32_t timeout;
102
103         /* Do not send HWRM commands to firmware in error state */
104         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
105                 return 0;
106
107         /* For VER_GET command, set timeout as 50ms */
108         if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
109                 timeout = HWRM_SHORT_CMD_TIMEOUT;
110         else
111                 timeout = HWRM_CMD_TIMEOUT;
112
113         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
114             msg_len > bp->max_req_len) {
115                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
116
117                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
118                 memcpy(short_cmd_req, req, msg_len);
119
120                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
121                 short_input.signature = rte_cpu_to_le_16(
122                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
123                 short_input.size = rte_cpu_to_le_16(msg_len);
124                 short_input.req_addr =
125                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
126
127                 data = (uint32_t *)&short_input;
128                 msg_len = sizeof(short_input);
129
130                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
131         }
132
133         /* Write request msg to hwrm channel */
134         for (i = 0; i < msg_len; i += 4) {
135                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
136                 rte_write32(*data, bar);
137                 data++;
138         }
139
140         /* Zero the rest of the request space */
141         for (; i < max_req_len; i += 4) {
142                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
143                 rte_write32(0, bar);
144         }
145
146         /* Ring channel doorbell */
147         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
148         rte_write32(1, bar);
149         /*
150          * Make sure the channel doorbell ring command complete before
151          * reading the response to avoid getting stale or invalid
152          * responses.
153          */
154         rte_io_mb();
155
156         /* Poll for the valid bit */
157         for (i = 0; i < timeout; i++) {
158                 /* Sanity check on the resp->resp_len */
159                 rte_cio_rmb();
160                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
161                         /* Last byte of resp contains the valid key */
162                         valid = (uint8_t *)resp + resp->resp_len - 1;
163                         if (*valid == HWRM_RESP_VALID_KEY)
164                                 break;
165                 }
166                 rte_delay_us(1);
167         }
168
169         if (i >= timeout) {
170                 /* Suppress VER_GET timeout messages during reset recovery */
171                 if (bp->flags & BNXT_FLAG_FW_RESET &&
172                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
173                         return -ETIMEDOUT;
174
175                 PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
176                             req->req_type);
177                 return -ETIMEDOUT;
178         }
179         return 0;
180 }
181
182 /*
183  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
184  * spinlock, and does initial processing.
185  *
186  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
187  * releases the spinlock only if it returns.  If the regular int return codes
188  * are not used by the function, HWRM_CHECK_RESULT() should not be used
189  * directly, rather it should be copied and modified to suit the function.
190  *
191  * HWRM_UNLOCK() must be called after all response processing is completed.
192  */
193 #define HWRM_PREP(req, type, kong) do { \
194         rte_spinlock_lock(&bp->hwrm_lock); \
195         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
196         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
197         req.cmpl_ring = rte_cpu_to_le_16(-1); \
198         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
199                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
200         req.target_id = rte_cpu_to_le_16(0xffff); \
201         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
202 } while (0)
203
204 #define HWRM_CHECK_RESULT_SILENT() do {\
205         if (rc) { \
206                 rte_spinlock_unlock(&bp->hwrm_lock); \
207                 return rc; \
208         } \
209         if (resp->error_code) { \
210                 rc = rte_le_to_cpu_16(resp->error_code); \
211                 rte_spinlock_unlock(&bp->hwrm_lock); \
212                 return rc; \
213         } \
214 } while (0)
215
216 #define HWRM_CHECK_RESULT() do {\
217         if (rc) { \
218                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
219                 rte_spinlock_unlock(&bp->hwrm_lock); \
220                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
221                         rc = -EACCES; \
222                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
223                         rc = -ENOSPC; \
224                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
225                         rc = -EINVAL; \
226                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
227                         rc = -ENOTSUP; \
228                 else if (rc > 0) \
229                         rc = -EIO; \
230                 return rc; \
231         } \
232         if (resp->error_code) { \
233                 rc = rte_le_to_cpu_16(resp->error_code); \
234                 if (resp->resp_len >= 16) { \
235                         struct hwrm_err_output *tmp_hwrm_err_op = \
236                                                 (void *)resp; \
237                         PMD_DRV_LOG(ERR, \
238                                 "error %d:%d:%08x:%04x\n", \
239                                 rc, tmp_hwrm_err_op->cmd_err, \
240                                 rte_le_to_cpu_32(\
241                                         tmp_hwrm_err_op->opaque_0), \
242                                 rte_le_to_cpu_16(\
243                                         tmp_hwrm_err_op->opaque_1)); \
244                 } else { \
245                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
246                 } \
247                 rte_spinlock_unlock(&bp->hwrm_lock); \
248                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
249                         rc = -EACCES; \
250                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
251                         rc = -ENOSPC; \
252                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
253                         rc = -EINVAL; \
254                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
255                         rc = -ENOTSUP; \
256                 else if (rc > 0) \
257                         rc = -EIO; \
258                 return rc; \
259         } \
260 } while (0)
261
262 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
263
264 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
265 {
266         int rc = 0;
267         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
268         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
269
270         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
271         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
272         req.mask = 0;
273
274         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
275
276         HWRM_CHECK_RESULT();
277         HWRM_UNLOCK();
278
279         return rc;
280 }
281
282 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
283                                  struct bnxt_vnic_info *vnic,
284                                  uint16_t vlan_count,
285                                  struct bnxt_vlan_table_entry *vlan_table)
286 {
287         int rc = 0;
288         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
289         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
290         uint32_t mask = 0;
291
292         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
293                 return rc;
294
295         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
296         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
297
298         /* FIXME add multicast flag, when multicast adding options is supported
299          * by ethtool.
300          */
301         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
302                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
303         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
304                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
305         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
306                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
307         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
308                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
309         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
310                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
311         if (vnic->mc_addr_cnt) {
312                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
313                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
314                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
315         }
316         if (vlan_table) {
317                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
318                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
319                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
320                          rte_mem_virt2iova(vlan_table));
321                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
322         }
323         req.mask = rte_cpu_to_le_32(mask);
324
325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
326
327         HWRM_CHECK_RESULT();
328         HWRM_UNLOCK();
329
330         return rc;
331 }
332
333 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
334                         uint16_t vlan_count,
335                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
336 {
337         int rc = 0;
338         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
339         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
340                                                 bp->hwrm_cmd_resp_addr;
341
342         /*
343          * Older HWRM versions did not support this command, and the set_rx_mask
344          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
345          * removed from set_rx_mask call, and this command was added.
346          *
347          * This command is also present from 1.7.8.11 and higher,
348          * as well as 1.7.8.0
349          */
350         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
351                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
352                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
353                                         (11)))
354                                 return 0;
355                 }
356         }
357         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
358         req.fid = rte_cpu_to_le_16(fid);
359
360         req.vlan_tag_mask_tbl_addr =
361                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
362         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
363
364         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
365
366         HWRM_CHECK_RESULT();
367         HWRM_UNLOCK();
368
369         return rc;
370 }
371
372 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
373                            struct bnxt_filter_info *filter)
374 {
375         int rc = 0;
376         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
377         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
378
379         if (filter->fw_l2_filter_id == UINT64_MAX)
380                 return 0;
381
382         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
383
384         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
385
386         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
387
388         HWRM_CHECK_RESULT();
389         HWRM_UNLOCK();
390
391         filter->fw_l2_filter_id = UINT64_MAX;
392
393         return 0;
394 }
395
396 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
397                          uint16_t dst_id,
398                          struct bnxt_filter_info *filter)
399 {
400         int rc = 0;
401         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
402         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
403         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
404         const struct rte_eth_vmdq_rx_conf *conf =
405                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
406         uint32_t enables = 0;
407         uint16_t j = dst_id - 1;
408
409         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
410         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
411             conf->pool_map[j].pools & (1UL << j)) {
412                 PMD_DRV_LOG(DEBUG,
413                         "Add vlan %u to vmdq pool %u\n",
414                         conf->pool_map[j].vlan_id, j);
415
416                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
417                 filter->enables |=
418                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
419                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
420         }
421
422         if (filter->fw_l2_filter_id != UINT64_MAX)
423                 bnxt_hwrm_clear_l2_filter(bp, filter);
424
425         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
426
427         req.flags = rte_cpu_to_le_32(filter->flags);
428
429         enables = filter->enables |
430               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
431         req.dst_id = rte_cpu_to_le_16(dst_id);
432
433         if (enables &
434             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
435                 memcpy(req.l2_addr, filter->l2_addr,
436                        RTE_ETHER_ADDR_LEN);
437         if (enables &
438             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
439                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
440                        RTE_ETHER_ADDR_LEN);
441         if (enables &
442             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
443                 req.l2_ovlan = filter->l2_ovlan;
444         if (enables &
445             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
446                 req.l2_ivlan = filter->l2_ivlan;
447         if (enables &
448             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
449                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
450         if (enables &
451             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
452                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
453         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
454                 req.src_id = rte_cpu_to_le_32(filter->src_id);
455         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
456                 req.src_type = filter->src_type;
457         if (filter->pri_hint) {
458                 req.pri_hint = filter->pri_hint;
459                 req.l2_filter_id_hint =
460                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
461         }
462
463         req.enables = rte_cpu_to_le_32(enables);
464
465         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
466
467         HWRM_CHECK_RESULT();
468
469         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
470         HWRM_UNLOCK();
471
472         return rc;
473 }
474
475 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
476 {
477         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
478         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
479         uint32_t flags = 0;
480         int rc;
481
482         if (!ptp)
483                 return 0;
484
485         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
486
487         if (ptp->rx_filter)
488                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
489         else
490                 flags |=
491                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
492         if (ptp->tx_tstamp_en)
493                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
494         else
495                 flags |=
496                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
497         req.flags = rte_cpu_to_le_32(flags);
498         req.enables = rte_cpu_to_le_32
499                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
500         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
501
502         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
503         HWRM_UNLOCK();
504
505         return rc;
506 }
507
508 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
509 {
510         int rc = 0;
511         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
512         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
513         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
514
515 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
516         if (ptp)
517                 return 0;
518
519         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
520
521         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
522
523         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
524
525         HWRM_CHECK_RESULT();
526
527         if (!BNXT_CHIP_THOR(bp) &&
528             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
529                 return 0;
530
531         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
532                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
533
534         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
535         if (!ptp)
536                 return -ENOMEM;
537
538         if (!BNXT_CHIP_THOR(bp)) {
539                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
540                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
541                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
542                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
543                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
544                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
545                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
546                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
547                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
548                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
549                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
550                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
551                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
552                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
553                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
554                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
555                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
556                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
557         }
558
559         ptp->bp = bp;
560         bp->ptp_cfg = ptp;
561
562         return 0;
563 }
564
565 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
566 {
567         int rc = 0;
568         struct hwrm_func_qcaps_input req = {.req_type = 0 };
569         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
570         uint16_t new_max_vfs;
571         uint32_t flags;
572         int i;
573
574         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
575
576         req.fid = rte_cpu_to_le_16(0xffff);
577
578         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
579
580         HWRM_CHECK_RESULT();
581
582         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
583         flags = rte_le_to_cpu_32(resp->flags);
584         if (BNXT_PF(bp)) {
585                 bp->pf.port_id = resp->port_id;
586                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
587                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
588                 new_max_vfs = bp->pdev->max_vfs;
589                 if (new_max_vfs != bp->pf.max_vfs) {
590                         if (bp->pf.vf_info)
591                                 rte_free(bp->pf.vf_info);
592                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
593                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
594                         bp->pf.max_vfs = new_max_vfs;
595                         for (i = 0; i < new_max_vfs; i++) {
596                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
597                                 bp->pf.vf_info[i].vlan_table =
598                                         rte_zmalloc("VF VLAN table",
599                                                     getpagesize(),
600                                                     getpagesize());
601                                 if (bp->pf.vf_info[i].vlan_table == NULL)
602                                         PMD_DRV_LOG(ERR,
603                                         "Fail to alloc VLAN table for VF %d\n",
604                                         i);
605                                 else
606                                         rte_mem_lock_page(
607                                                 bp->pf.vf_info[i].vlan_table);
608                                 bp->pf.vf_info[i].vlan_as_table =
609                                         rte_zmalloc("VF VLAN AS table",
610                                                     getpagesize(),
611                                                     getpagesize());
612                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
613                                         PMD_DRV_LOG(ERR,
614                                         "Alloc VLAN AS table for VF %d fail\n",
615                                         i);
616                                 else
617                                         rte_mem_lock_page(
618                                                bp->pf.vf_info[i].vlan_as_table);
619                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
620                         }
621                 }
622         }
623
624         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
625         memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
626         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
627         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
628         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
629         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
630         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
631         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
632         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
633         if (!BNXT_CHIP_THOR(bp))
634                 bp->max_l2_ctx += bp->max_rx_em_flows;
635         /* TODO: For now, do not support VMDq/RFS on VFs. */
636         if (BNXT_PF(bp)) {
637                 if (bp->pf.max_vfs)
638                         bp->max_vnics = 1;
639                 else
640                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
641         } else {
642                 bp->max_vnics = 1;
643         }
644         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
645         if (BNXT_PF(bp)) {
646                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
647                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
648                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
649                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
650                         HWRM_UNLOCK();
651                         bnxt_hwrm_ptp_qcfg(bp);
652                 }
653         }
654
655         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
656                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
657
658         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
659                 bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
660                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
661         } else {
662                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
663         }
664
665         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
666                 bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
667         else
668                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
669
670         HWRM_UNLOCK();
671
672         return rc;
673 }
674
675 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
676 {
677         int rc;
678
679         rc = __bnxt_hwrm_func_qcaps(bp);
680         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
681                 rc = bnxt_alloc_ctx_mem(bp);
682                 if (rc)
683                         return rc;
684
685                 rc = bnxt_hwrm_func_resc_qcaps(bp);
686                 if (!rc)
687                         bp->flags |= BNXT_FLAG_NEW_RM;
688         }
689
690         return rc;
691 }
692
693 int bnxt_hwrm_func_reset(struct bnxt *bp)
694 {
695         int rc = 0;
696         struct hwrm_func_reset_input req = {.req_type = 0 };
697         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
698
699         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
700
701         req.enables = rte_cpu_to_le_32(0);
702
703         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
704
705         HWRM_CHECK_RESULT();
706         HWRM_UNLOCK();
707
708         return rc;
709 }
710
711 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
712 {
713         int rc;
714         uint32_t flags = 0;
715         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
716         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
717
718         if (bp->flags & BNXT_FLAG_REGISTERED)
719                 return 0;
720
721         flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
722         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
723                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
724
725         /* PFs and trusted VFs should indicate the support of the
726          * Master capability on non Stingray platform
727          */
728         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
729                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
730
731         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
732         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
733                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
734         req.ver_maj = RTE_VER_YEAR;
735         req.ver_min = RTE_VER_MONTH;
736         req.ver_upd = RTE_VER_MINOR;
737
738         if (BNXT_PF(bp)) {
739                 req.enables |= rte_cpu_to_le_32(
740                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
741                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
742                        RTE_MIN(sizeof(req.vf_req_fwd),
743                                sizeof(bp->pf.vf_req_fwd)));
744
745                 /*
746                  * PF can sniff HWRM API issued by VF. This can be set up by
747                  * linux driver and inherited by the DPDK PF driver. Clear
748                  * this HWRM sniffer list in FW because DPDK PF driver does
749                  * not support this.
750                  */
751                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
752         }
753
754         req.flags = rte_cpu_to_le_32(flags);
755
756         req.async_event_fwd[0] |=
757                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
758                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
759                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
760                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
761                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
762         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
763                 req.async_event_fwd[0] |=
764                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
765         req.async_event_fwd[1] |=
766                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
767                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
768
769         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
770
771         HWRM_CHECK_RESULT();
772
773         flags = rte_le_to_cpu_32(resp->flags);
774         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
775                 bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
776
777         HWRM_UNLOCK();
778
779         bp->flags |= BNXT_FLAG_REGISTERED;
780
781         return rc;
782 }
783
784 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
785 {
786         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
787                 return 0;
788
789         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
790 }
791
792 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
793 {
794         int rc;
795         uint32_t flags = 0;
796         uint32_t enables;
797         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
798         struct hwrm_func_vf_cfg_input req = {0};
799
800         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
801
802         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
803                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
804                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
805                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
806                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
807
808         if (BNXT_HAS_RING_GRPS(bp)) {
809                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
810                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
811         }
812
813         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
814         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
815                                             AGG_RING_MULTIPLIER);
816         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
817                                              bp->tx_nr_rings +
818                                              BNXT_NUM_ASYNC_CPR(bp));
819         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
820                                               bp->tx_nr_rings +
821                                               BNXT_NUM_ASYNC_CPR(bp));
822         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
823         if (bp->vf_resv_strategy ==
824             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
825                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
826                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
827                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
828                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
829                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
830                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
831         }
832
833         if (test)
834                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
835                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
836                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
837                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
838                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
839                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
840
841         if (test && BNXT_HAS_RING_GRPS(bp))
842                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
843
844         req.flags = rte_cpu_to_le_32(flags);
845         req.enables |= rte_cpu_to_le_32(enables);
846
847         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
848
849         if (test)
850                 HWRM_CHECK_RESULT_SILENT();
851         else
852                 HWRM_CHECK_RESULT();
853
854         HWRM_UNLOCK();
855         return rc;
856 }
857
858 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
859 {
860         int rc;
861         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
862         struct hwrm_func_resource_qcaps_input req = {0};
863
864         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
865         req.fid = rte_cpu_to_le_16(0xffff);
866
867         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
868
869         HWRM_CHECK_RESULT();
870
871         if (BNXT_VF(bp)) {
872                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
873                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
874                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
875                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
876                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
877                 /* func_resource_qcaps does not return max_rx_em_flows.
878                  * So use the value provided by func_qcaps.
879                  */
880                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
881                 if (!BNXT_CHIP_THOR(bp))
882                         bp->max_l2_ctx += bp->max_rx_em_flows;
883                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
884                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
885         }
886         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
887         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
888         if (bp->vf_resv_strategy >
889             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
890                 bp->vf_resv_strategy =
891                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
892
893         HWRM_UNLOCK();
894         return rc;
895 }
896
897 int bnxt_hwrm_ver_get(struct bnxt *bp)
898 {
899         int rc = 0;
900         struct hwrm_ver_get_input req = {.req_type = 0 };
901         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
902         uint32_t fw_version;
903         uint16_t max_resp_len;
904         char type[RTE_MEMZONE_NAMESIZE];
905         uint32_t dev_caps_cfg;
906
907         bp->max_req_len = HWRM_MAX_REQ_LEN;
908         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
909
910         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
911         req.hwrm_intf_min = HWRM_VERSION_MINOR;
912         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
913
914         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
915
916         if (bp->flags & BNXT_FLAG_FW_RESET)
917                 HWRM_CHECK_RESULT_SILENT();
918         else
919                 HWRM_CHECK_RESULT();
920
921         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
922                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
923                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
924                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
925         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
926                      (resp->hwrm_fw_min_8b << 16) |
927                      (resp->hwrm_fw_bld_8b << 8) |
928                      resp->hwrm_fw_rsvd_8b;
929         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
930                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
931
932         fw_version = resp->hwrm_intf_maj_8b << 16;
933         fw_version |= resp->hwrm_intf_min_8b << 8;
934         fw_version |= resp->hwrm_intf_upd_8b;
935         bp->hwrm_spec_code = fw_version;
936
937         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
938                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
939                 rc = -EINVAL;
940                 goto error;
941         }
942
943         if (bp->max_req_len > resp->max_req_win_len) {
944                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
945                 rc = -EINVAL;
946         }
947         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
948         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
949         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
950                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
951
952         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
953         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
954
955         if (bp->max_resp_len != max_resp_len) {
956                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
957                         bp->pdev->addr.domain, bp->pdev->addr.bus,
958                         bp->pdev->addr.devid, bp->pdev->addr.function);
959
960                 rte_free(bp->hwrm_cmd_resp_addr);
961
962                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
963                 if (bp->hwrm_cmd_resp_addr == NULL) {
964                         rc = -ENOMEM;
965                         goto error;
966                 }
967                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
968                 bp->hwrm_cmd_resp_dma_addr =
969                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
970                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
971                         PMD_DRV_LOG(ERR,
972                         "Unable to map response buffer to physical memory.\n");
973                         rc = -ENOMEM;
974                         goto error;
975                 }
976                 bp->max_resp_len = max_resp_len;
977         }
978
979         if ((dev_caps_cfg &
980                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
981             (dev_caps_cfg &
982              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
983                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
984                 bp->flags |= BNXT_FLAG_SHORT_CMD;
985         }
986
987         if (((dev_caps_cfg &
988               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
989              (dev_caps_cfg &
990               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
991             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
992                 sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
993                         bp->pdev->addr.domain, bp->pdev->addr.bus,
994                         bp->pdev->addr.devid, bp->pdev->addr.function);
995
996                 rte_free(bp->hwrm_short_cmd_req_addr);
997
998                 bp->hwrm_short_cmd_req_addr =
999                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1000                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1001                         rc = -ENOMEM;
1002                         goto error;
1003                 }
1004                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
1005                 bp->hwrm_short_cmd_req_dma_addr =
1006                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
1007                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1008                         rte_free(bp->hwrm_short_cmd_req_addr);
1009                         PMD_DRV_LOG(ERR,
1010                                 "Unable to map buffer to physical memory.\n");
1011                         rc = -ENOMEM;
1012                         goto error;
1013                 }
1014         }
1015         if (dev_caps_cfg &
1016             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1017                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1018                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1019         }
1020         if (dev_caps_cfg &
1021             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1022                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1023         if (dev_caps_cfg &
1024             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1025                 bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT;
1026                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1027         }
1028
1029 error:
1030         HWRM_UNLOCK();
1031         return rc;
1032 }
1033
1034 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1035 {
1036         int rc;
1037         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1038         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1039
1040         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1041                 return 0;
1042
1043         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1044         req.flags = flags;
1045
1046         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1047
1048         HWRM_CHECK_RESULT();
1049         HWRM_UNLOCK();
1050
1051         return rc;
1052 }
1053
1054 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1055 {
1056         int rc = 0;
1057         struct hwrm_port_phy_cfg_input req = {0};
1058         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1059         uint32_t enables = 0;
1060
1061         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1062
1063         if (conf->link_up) {
1064                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1065                 if (bp->link_info.auto_mode && conf->link_speed) {
1066                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1067                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1068                 }
1069
1070                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1071                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
1072                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1073                 /*
1074                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1075                  * any auto mode, even "none".
1076                  */
1077                 if (!conf->link_speed) {
1078                         /* No speeds specified. Enable AutoNeg - all speeds */
1079                         req.auto_mode =
1080                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1081                 }
1082                 /* AutoNeg - Advertise speeds specified. */
1083                 if (conf->auto_link_speed_mask &&
1084                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1085                         req.auto_mode =
1086                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1087                         req.auto_link_speed_mask =
1088                                 conf->auto_link_speed_mask;
1089                         enables |=
1090                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1091                 }
1092
1093                 req.auto_duplex = conf->duplex;
1094                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1095                 req.auto_pause = conf->auto_pause;
1096                 req.force_pause = conf->force_pause;
1097                 /* Set force_pause if there is no auto or if there is a force */
1098                 if (req.auto_pause && !req.force_pause)
1099                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1100                 else
1101                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1102
1103                 req.enables = rte_cpu_to_le_32(enables);
1104         } else {
1105                 req.flags =
1106                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1107                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1108         }
1109
1110         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1111
1112         HWRM_CHECK_RESULT();
1113         HWRM_UNLOCK();
1114
1115         return rc;
1116 }
1117
1118 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1119                                    struct bnxt_link_info *link_info)
1120 {
1121         int rc = 0;
1122         struct hwrm_port_phy_qcfg_input req = {0};
1123         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1124
1125         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1126
1127         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1128
1129         HWRM_CHECK_RESULT();
1130
1131         link_info->phy_link_status = resp->link;
1132         link_info->link_up =
1133                 (link_info->phy_link_status ==
1134                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1135         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1136         link_info->duplex = resp->duplex_cfg;
1137         link_info->pause = resp->pause;
1138         link_info->auto_pause = resp->auto_pause;
1139         link_info->force_pause = resp->force_pause;
1140         link_info->auto_mode = resp->auto_mode;
1141         link_info->phy_type = resp->phy_type;
1142         link_info->media_type = resp->media_type;
1143
1144         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1145         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1146         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1147         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1148         link_info->phy_ver[0] = resp->phy_maj;
1149         link_info->phy_ver[1] = resp->phy_min;
1150         link_info->phy_ver[2] = resp->phy_bld;
1151
1152         HWRM_UNLOCK();
1153
1154         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1155         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1156         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1157         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1158         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1159                     link_info->auto_link_speed_mask);
1160         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1161                     link_info->force_link_speed);
1162
1163         return rc;
1164 }
1165
1166 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1167 {
1168         int rc = 0;
1169         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1170         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1171         int i;
1172
1173         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1174
1175         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1176         /* HWRM Version >= 1.9.1 */
1177         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1178                 req.drv_qmap_cap =
1179                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1180         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1181
1182         HWRM_CHECK_RESULT();
1183
1184 #define GET_QUEUE_INFO(x) \
1185         bp->cos_queue[x].id = resp->queue_id##x; \
1186         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1187
1188         GET_QUEUE_INFO(0);
1189         GET_QUEUE_INFO(1);
1190         GET_QUEUE_INFO(2);
1191         GET_QUEUE_INFO(3);
1192         GET_QUEUE_INFO(4);
1193         GET_QUEUE_INFO(5);
1194         GET_QUEUE_INFO(6);
1195         GET_QUEUE_INFO(7);
1196
1197         HWRM_UNLOCK();
1198
1199         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1200                 bp->tx_cosq_id = bp->cos_queue[0].id;
1201         } else {
1202                 /* iterate and find the COSq profile to use for Tx */
1203                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1204                         if (bp->cos_queue[i].profile ==
1205                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1206                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1207                                 break;
1208                         }
1209                 }
1210         }
1211
1212         bp->max_tc = resp->max_configurable_queues;
1213         bp->max_lltc = resp->max_configurable_lossless_queues;
1214         if (bp->max_tc > BNXT_MAX_QUEUE)
1215                 bp->max_tc = BNXT_MAX_QUEUE;
1216         bp->max_q = bp->max_tc;
1217
1218         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1219
1220         return rc;
1221 }
1222
1223 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1224                          struct bnxt_ring *ring,
1225                          uint32_t ring_type, uint32_t map_index,
1226                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1227 {
1228         int rc = 0;
1229         uint32_t enables = 0;
1230         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1231         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1232         struct rte_mempool *mb_pool;
1233         uint16_t rx_buf_size;
1234
1235         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1236
1237         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1238         req.fbo = rte_cpu_to_le_32(0);
1239         /* Association of ring index with doorbell index */
1240         req.logical_id = rte_cpu_to_le_16(map_index);
1241         req.length = rte_cpu_to_le_32(ring->ring_size);
1242
1243         switch (ring_type) {
1244         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1245                 req.ring_type = ring_type;
1246                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1247                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1248                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1249                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1250                         enables |=
1251                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1252                 break;
1253         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1254                 req.ring_type = ring_type;
1255                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1256                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1257                 if (BNXT_CHIP_THOR(bp)) {
1258                         mb_pool = bp->rx_queues[0]->mb_pool;
1259                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1260                                       RTE_PKTMBUF_HEADROOM;
1261                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1262                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1263                         enables |=
1264                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1265                 }
1266                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1267                         enables |=
1268                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1269                 break;
1270         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1271                 req.ring_type = ring_type;
1272                 if (BNXT_HAS_NQ(bp)) {
1273                         /* Association of cp ring with nq */
1274                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1275                         enables |=
1276                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1277                 }
1278                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1279                 break;
1280         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1281                 req.ring_type = ring_type;
1282                 req.page_size = BNXT_PAGE_SHFT;
1283                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1284                 break;
1285         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1286                 req.ring_type = ring_type;
1287                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1288
1289                 mb_pool = bp->rx_queues[0]->mb_pool;
1290                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1291                               RTE_PKTMBUF_HEADROOM;
1292                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1293                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1294
1295                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1296                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1297                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1298                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1299                 break;
1300         default:
1301                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1302                         ring_type);
1303                 HWRM_UNLOCK();
1304                 return -EINVAL;
1305         }
1306         req.enables = rte_cpu_to_le_32(enables);
1307
1308         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1309
1310         if (rc || resp->error_code) {
1311                 if (rc == 0 && resp->error_code)
1312                         rc = rte_le_to_cpu_16(resp->error_code);
1313                 switch (ring_type) {
1314                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1315                         PMD_DRV_LOG(ERR,
1316                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1317                         HWRM_UNLOCK();
1318                         return rc;
1319                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1320                         PMD_DRV_LOG(ERR,
1321                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1322                         HWRM_UNLOCK();
1323                         return rc;
1324                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1325                         PMD_DRV_LOG(ERR,
1326                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1327                                     rc);
1328                         HWRM_UNLOCK();
1329                         return rc;
1330                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1331                         PMD_DRV_LOG(ERR,
1332                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1333                         HWRM_UNLOCK();
1334                         return rc;
1335                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1336                         PMD_DRV_LOG(ERR,
1337                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1338                         HWRM_UNLOCK();
1339                         return rc;
1340                 default:
1341                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1342                         HWRM_UNLOCK();
1343                         return rc;
1344                 }
1345         }
1346
1347         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1348         HWRM_UNLOCK();
1349         return rc;
1350 }
1351
1352 int bnxt_hwrm_ring_free(struct bnxt *bp,
1353                         struct bnxt_ring *ring, uint32_t ring_type)
1354 {
1355         int rc;
1356         struct hwrm_ring_free_input req = {.req_type = 0 };
1357         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1358
1359         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1360
1361         req.ring_type = ring_type;
1362         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1363
1364         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1365
1366         if (rc || resp->error_code) {
1367                 if (rc == 0 && resp->error_code)
1368                         rc = rte_le_to_cpu_16(resp->error_code);
1369                 HWRM_UNLOCK();
1370
1371                 switch (ring_type) {
1372                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1373                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1374                                 rc);
1375                         return rc;
1376                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1377                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1378                                 rc);
1379                         return rc;
1380                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1381                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1382                                 rc);
1383                         return rc;
1384                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1385                         PMD_DRV_LOG(ERR,
1386                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1387                         return rc;
1388                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1389                         PMD_DRV_LOG(ERR,
1390                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1391                         return rc;
1392                 default:
1393                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1394                         return rc;
1395                 }
1396         }
1397         HWRM_UNLOCK();
1398         return 0;
1399 }
1400
1401 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1402 {
1403         int rc = 0;
1404         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1405         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1406
1407         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1408
1409         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1410         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1411         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1412         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1413
1414         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1415
1416         HWRM_CHECK_RESULT();
1417
1418         bp->grp_info[idx].fw_grp_id =
1419             rte_le_to_cpu_16(resp->ring_group_id);
1420
1421         HWRM_UNLOCK();
1422
1423         return rc;
1424 }
1425
1426 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1427 {
1428         int rc;
1429         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1430         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1431
1432         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1433
1434         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1435
1436         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1437
1438         HWRM_CHECK_RESULT();
1439         HWRM_UNLOCK();
1440
1441         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1442         return rc;
1443 }
1444
1445 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1446 {
1447         int rc = 0;
1448         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1449         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1450
1451         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1452                 return rc;
1453
1454         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1455
1456         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1457
1458         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1459
1460         HWRM_CHECK_RESULT();
1461         HWRM_UNLOCK();
1462
1463         return rc;
1464 }
1465
1466 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1467                                 unsigned int idx __rte_unused)
1468 {
1469         int rc;
1470         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1471         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1472
1473         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1474
1475         req.update_period_ms = rte_cpu_to_le_32(0);
1476
1477         req.stats_dma_addr =
1478             rte_cpu_to_le_64(cpr->hw_stats_map);
1479
1480         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1481
1482         HWRM_CHECK_RESULT();
1483
1484         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1485
1486         HWRM_UNLOCK();
1487
1488         return rc;
1489 }
1490
1491 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1492                                 unsigned int idx __rte_unused)
1493 {
1494         int rc;
1495         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1496         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1497
1498         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1499
1500         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1501
1502         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1503
1504         HWRM_CHECK_RESULT();
1505         HWRM_UNLOCK();
1506
1507         return rc;
1508 }
1509
1510 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1511 {
1512         int rc = 0, i, j;
1513         struct hwrm_vnic_alloc_input req = { 0 };
1514         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1515
1516         if (!BNXT_HAS_RING_GRPS(bp))
1517                 goto skip_ring_grps;
1518
1519         /* map ring groups to this vnic */
1520         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1521                 vnic->start_grp_id, vnic->end_grp_id);
1522         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1523                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1524
1525         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1526         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1527         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1528         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1529
1530 skip_ring_grps:
1531         vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
1532                                 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
1533         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1534
1535         if (vnic->func_default)
1536                 req.flags =
1537                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1538         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1539
1540         HWRM_CHECK_RESULT();
1541
1542         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1543         HWRM_UNLOCK();
1544         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1545         return rc;
1546 }
1547
1548 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1549                                         struct bnxt_vnic_info *vnic,
1550                                         struct bnxt_plcmodes_cfg *pmode)
1551 {
1552         int rc = 0;
1553         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1554         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1555
1556         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1557
1558         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1559
1560         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1561
1562         HWRM_CHECK_RESULT();
1563
1564         pmode->flags = rte_le_to_cpu_32(resp->flags);
1565         /* dflt_vnic bit doesn't exist in the _cfg command */
1566         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1567         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1568         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1569         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1570
1571         HWRM_UNLOCK();
1572
1573         return rc;
1574 }
1575
1576 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1577                                        struct bnxt_vnic_info *vnic,
1578                                        struct bnxt_plcmodes_cfg *pmode)
1579 {
1580         int rc = 0;
1581         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1582         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1583
1584         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1585                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1586                 return rc;
1587         }
1588
1589         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1590
1591         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1592         req.flags = rte_cpu_to_le_32(pmode->flags);
1593         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1594         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1595         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1596         req.enables = rte_cpu_to_le_32(
1597             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1598             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1599             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1600         );
1601
1602         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1603
1604         HWRM_CHECK_RESULT();
1605         HWRM_UNLOCK();
1606
1607         return rc;
1608 }
1609
1610 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1611 {
1612         int rc = 0;
1613         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1614         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1615         struct bnxt_plcmodes_cfg pmodes = { 0 };
1616         uint32_t ctx_enable_flag = 0;
1617         uint32_t enables = 0;
1618
1619         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1620                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1621                 return rc;
1622         }
1623
1624         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1625         if (rc)
1626                 return rc;
1627
1628         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1629
1630         if (BNXT_CHIP_THOR(bp)) {
1631                 struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];
1632                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1633                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1634
1635                 req.default_rx_ring_id =
1636                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1637                 req.default_cmpl_ring_id =
1638                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1639                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1640                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1641                 goto config_mru;
1642         }
1643
1644         /* Only RSS support for now TBD: COS & LB */
1645         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1646         if (vnic->lb_rule != 0xffff)
1647                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1648         if (vnic->cos_rule != 0xffff)
1649                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1650         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1651                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1652                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1653         }
1654         enables |= ctx_enable_flag;
1655         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1656         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1657         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1658         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1659
1660 config_mru:
1661         req.enables = rte_cpu_to_le_32(enables);
1662         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1663         req.mru = rte_cpu_to_le_16(vnic->mru);
1664         /* Configure default VNIC only once. */
1665         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1666                 req.flags |=
1667                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1668                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1669         }
1670         if (vnic->vlan_strip)
1671                 req.flags |=
1672                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1673         if (vnic->bd_stall)
1674                 req.flags |=
1675                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1676         if (vnic->roce_dual)
1677                 req.flags |= rte_cpu_to_le_32(
1678                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1679         if (vnic->roce_only)
1680                 req.flags |= rte_cpu_to_le_32(
1681                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1682         if (vnic->rss_dflt_cr)
1683                 req.flags |= rte_cpu_to_le_32(
1684                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1685
1686         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1687
1688         HWRM_CHECK_RESULT();
1689         HWRM_UNLOCK();
1690
1691         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1692
1693         return rc;
1694 }
1695
1696 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1697                 int16_t fw_vf_id)
1698 {
1699         int rc = 0;
1700         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1701         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1702
1703         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1704                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1705                 return rc;
1706         }
1707         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1708
1709         req.enables =
1710                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1711         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1712         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1713
1714         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1715
1716         HWRM_CHECK_RESULT();
1717
1718         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1719         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1720         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1721         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1722         vnic->mru = rte_le_to_cpu_16(resp->mru);
1723         vnic->func_default = rte_le_to_cpu_32(
1724                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1725         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1726                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1727         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1728                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1729         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1730                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1731         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1732                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1733         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1734                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1735
1736         HWRM_UNLOCK();
1737
1738         return rc;
1739 }
1740
1741 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
1742                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1743 {
1744         int rc = 0;
1745         uint16_t ctx_id;
1746         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1747         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1748                                                 bp->hwrm_cmd_resp_addr;
1749
1750         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1751
1752         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1753         HWRM_CHECK_RESULT();
1754
1755         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1756         if (!BNXT_HAS_RING_GRPS(bp))
1757                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
1758         else if (ctx_idx == 0)
1759                 vnic->rss_rule = ctx_id;
1760
1761         HWRM_UNLOCK();
1762
1763         return rc;
1764 }
1765
1766 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
1767                             struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1768 {
1769         int rc = 0;
1770         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1771         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1772                                                 bp->hwrm_cmd_resp_addr;
1773
1774         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
1775                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1776                 return rc;
1777         }
1778         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1779
1780         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
1781
1782         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1783
1784         HWRM_CHECK_RESULT();
1785         HWRM_UNLOCK();
1786
1787         return rc;
1788 }
1789
1790 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1791 {
1792         int rc = 0;
1793         struct hwrm_vnic_free_input req = {.req_type = 0 };
1794         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1795
1796         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1797                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1798                 return rc;
1799         }
1800
1801         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1802
1803         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1804
1805         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1806
1807         HWRM_CHECK_RESULT();
1808         HWRM_UNLOCK();
1809
1810         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1811         /* Configure default VNIC again if necessary. */
1812         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
1813                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
1814
1815         return rc;
1816 }
1817
1818 static int
1819 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1820 {
1821         int i;
1822         int rc = 0;
1823         int nr_ctxs = vnic->num_lb_ctxts;
1824         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1825         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1826
1827         for (i = 0; i < nr_ctxs; i++) {
1828                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1829
1830                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1831                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1832                 req.hash_mode_flags = vnic->hash_mode;
1833
1834                 req.hash_key_tbl_addr =
1835                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1836
1837                 req.ring_grp_tbl_addr =
1838                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
1839                                          i * HW_HASH_INDEX_SIZE);
1840                 req.ring_table_pair_index = i;
1841                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
1842
1843                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
1844                                             BNXT_USE_CHIMP_MB);
1845
1846                 HWRM_CHECK_RESULT();
1847                 HWRM_UNLOCK();
1848         }
1849
1850         return rc;
1851 }
1852
1853 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1854                            struct bnxt_vnic_info *vnic)
1855 {
1856         int rc = 0;
1857         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1858         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1859
1860         if (!vnic->rss_table)
1861                 return 0;
1862
1863         if (BNXT_CHIP_THOR(bp))
1864                 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
1865
1866         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1867
1868         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1869         req.hash_mode_flags = vnic->hash_mode;
1870
1871         req.ring_grp_tbl_addr =
1872             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1873         req.hash_key_tbl_addr =
1874             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1875         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1876         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1877
1878         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1879
1880         HWRM_CHECK_RESULT();
1881         HWRM_UNLOCK();
1882
1883         return rc;
1884 }
1885
1886 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1887                         struct bnxt_vnic_info *vnic)
1888 {
1889         int rc = 0;
1890         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1891         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1892         uint16_t size;
1893
1894         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1895                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1896                 return rc;
1897         }
1898
1899         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1900
1901         req.flags = rte_cpu_to_le_32(
1902                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1903
1904         req.enables = rte_cpu_to_le_32(
1905                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1906
1907         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1908         size -= RTE_PKTMBUF_HEADROOM;
1909         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1910
1911         req.jumbo_thresh = rte_cpu_to_le_16(size);
1912         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1913
1914         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1915
1916         HWRM_CHECK_RESULT();
1917         HWRM_UNLOCK();
1918
1919         return rc;
1920 }
1921
1922 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1923                         struct bnxt_vnic_info *vnic, bool enable)
1924 {
1925         int rc = 0;
1926         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1927         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1928
1929         if (BNXT_CHIP_THOR(bp))
1930                 return 0;
1931
1932         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1933
1934         if (enable) {
1935                 req.enables = rte_cpu_to_le_32(
1936                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1937                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1938                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1939                 req.flags = rte_cpu_to_le_32(
1940                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1941                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1942                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1943                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1944                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1945                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1946                 req.max_agg_segs = rte_cpu_to_le_16(5);
1947                 req.max_aggs =
1948                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1949                 req.min_agg_len = rte_cpu_to_le_32(512);
1950         }
1951         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1952
1953         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1954
1955         HWRM_CHECK_RESULT();
1956         HWRM_UNLOCK();
1957
1958         return rc;
1959 }
1960
1961 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1962 {
1963         struct hwrm_func_cfg_input req = {0};
1964         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1965         int rc;
1966
1967         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1968         req.enables = rte_cpu_to_le_32(
1969                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1970         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1971         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1972
1973         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1974
1975         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1976         HWRM_CHECK_RESULT();
1977         HWRM_UNLOCK();
1978
1979         bp->pf.vf_info[vf].random_mac = false;
1980
1981         return rc;
1982 }
1983
1984 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1985                                   uint64_t *dropped)
1986 {
1987         int rc = 0;
1988         struct hwrm_func_qstats_input req = {.req_type = 0};
1989         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1990
1991         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1992
1993         req.fid = rte_cpu_to_le_16(fid);
1994
1995         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1996
1997         HWRM_CHECK_RESULT();
1998
1999         if (dropped)
2000                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2001
2002         HWRM_UNLOCK();
2003
2004         return rc;
2005 }
2006
2007 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2008                           struct rte_eth_stats *stats)
2009 {
2010         int rc = 0;
2011         struct hwrm_func_qstats_input req = {.req_type = 0};
2012         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2013
2014         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2015
2016         req.fid = rte_cpu_to_le_16(fid);
2017
2018         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2019
2020         HWRM_CHECK_RESULT();
2021
2022         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2023         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2024         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2025         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2026         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2027         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2028
2029         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2030         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2031         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2032         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2033         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2034         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2035
2036         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2037         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2038         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2039
2040         HWRM_UNLOCK();
2041
2042         return rc;
2043 }
2044
2045 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2046 {
2047         int rc = 0;
2048         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2049         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2050
2051         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2052
2053         req.fid = rte_cpu_to_le_16(fid);
2054
2055         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2056
2057         HWRM_CHECK_RESULT();
2058         HWRM_UNLOCK();
2059
2060         return rc;
2061 }
2062
2063 /*
2064  * HWRM utility functions
2065  */
2066
2067 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2068 {
2069         unsigned int i;
2070         int rc = 0;
2071
2072         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2073                 struct bnxt_tx_queue *txq;
2074                 struct bnxt_rx_queue *rxq;
2075                 struct bnxt_cp_ring_info *cpr;
2076
2077                 if (i >= bp->rx_cp_nr_rings) {
2078                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2079                         cpr = txq->cp_ring;
2080                 } else {
2081                         rxq = bp->rx_queues[i];
2082                         cpr = rxq->cp_ring;
2083                 }
2084
2085                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2086                 if (rc)
2087                         return rc;
2088         }
2089         return 0;
2090 }
2091
2092 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2093 {
2094         int rc;
2095         unsigned int i;
2096         struct bnxt_cp_ring_info *cpr;
2097
2098         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2099
2100                 if (i >= bp->rx_cp_nr_rings) {
2101                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2102                 } else {
2103                         cpr = bp->rx_queues[i]->cp_ring;
2104                         if (BNXT_HAS_RING_GRPS(bp))
2105                                 bp->grp_info[i].fw_stats_ctx = -1;
2106                 }
2107                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2108                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2109                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2110                         if (rc)
2111                                 return rc;
2112                 }
2113         }
2114         return 0;
2115 }
2116
2117 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2118 {
2119         unsigned int i;
2120         int rc = 0;
2121
2122         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2123                 struct bnxt_tx_queue *txq;
2124                 struct bnxt_rx_queue *rxq;
2125                 struct bnxt_cp_ring_info *cpr;
2126
2127                 if (i >= bp->rx_cp_nr_rings) {
2128                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2129                         cpr = txq->cp_ring;
2130                 } else {
2131                         rxq = bp->rx_queues[i];
2132                         cpr = rxq->cp_ring;
2133                 }
2134
2135                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2136
2137                 if (rc)
2138                         return rc;
2139         }
2140         return rc;
2141 }
2142
2143 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2144 {
2145         uint16_t idx;
2146         uint32_t rc = 0;
2147
2148         if (!BNXT_HAS_RING_GRPS(bp))
2149                 return 0;
2150
2151         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2152
2153                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2154                         continue;
2155
2156                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2157
2158                 if (rc)
2159                         return rc;
2160         }
2161         return rc;
2162 }
2163
2164 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2165 {
2166         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2167
2168         bnxt_hwrm_ring_free(bp, cp_ring,
2169                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2170         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2171         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2172                                      sizeof(*cpr->cp_desc_ring));
2173         cpr->cp_raw_cons = 0;
2174         cpr->valid = 0;
2175 }
2176
2177 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2178 {
2179         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2180
2181         bnxt_hwrm_ring_free(bp, cp_ring,
2182                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2183         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2184         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2185                         sizeof(*cpr->cp_desc_ring));
2186         cpr->cp_raw_cons = 0;
2187         cpr->valid = 0;
2188 }
2189
2190 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2191 {
2192         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2193         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2194         struct bnxt_ring *ring = rxr->rx_ring_struct;
2195         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2196
2197         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2198                 bnxt_hwrm_ring_free(bp, ring,
2199                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2200                 ring->fw_ring_id = INVALID_HW_RING_ID;
2201                 if (BNXT_HAS_RING_GRPS(bp))
2202                         bp->grp_info[queue_index].rx_fw_ring_id =
2203                                                         INVALID_HW_RING_ID;
2204                 memset(rxr->rx_desc_ring, 0,
2205                        rxr->rx_ring_struct->ring_size *
2206                        sizeof(*rxr->rx_desc_ring));
2207                 memset(rxr->rx_buf_ring, 0,
2208                        rxr->rx_ring_struct->ring_size *
2209                        sizeof(*rxr->rx_buf_ring));
2210                 rxr->rx_prod = 0;
2211         }
2212         ring = rxr->ag_ring_struct;
2213         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2214                 bnxt_hwrm_ring_free(bp, ring,
2215                                     BNXT_CHIP_THOR(bp) ?
2216                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2217                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2218                 ring->fw_ring_id = INVALID_HW_RING_ID;
2219                 memset(rxr->ag_buf_ring, 0,
2220                        rxr->ag_ring_struct->ring_size *
2221                        sizeof(*rxr->ag_buf_ring));
2222                 rxr->ag_prod = 0;
2223                 if (BNXT_HAS_RING_GRPS(bp))
2224                         bp->grp_info[queue_index].ag_fw_ring_id =
2225                                                         INVALID_HW_RING_ID;
2226         }
2227         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2228                 bnxt_free_cp_ring(bp, cpr);
2229                 if (rxq->nq_ring)
2230                         bnxt_free_nq_ring(bp, rxq->nq_ring);
2231         }
2232
2233         if (BNXT_HAS_RING_GRPS(bp))
2234                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2235 }
2236
2237 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
2238 {
2239         unsigned int i;
2240
2241         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2242                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2243                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2244                 struct bnxt_ring *ring = txr->tx_ring_struct;
2245                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2246
2247                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2248                         bnxt_hwrm_ring_free(bp, ring,
2249                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2250                         ring->fw_ring_id = INVALID_HW_RING_ID;
2251                         memset(txr->tx_desc_ring, 0,
2252                                         txr->tx_ring_struct->ring_size *
2253                                         sizeof(*txr->tx_desc_ring));
2254                         memset(txr->tx_buf_ring, 0,
2255                                         txr->tx_ring_struct->ring_size *
2256                                         sizeof(*txr->tx_buf_ring));
2257                         txr->tx_prod = 0;
2258                         txr->tx_cons = 0;
2259                 }
2260                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2261                         bnxt_free_cp_ring(bp, cpr);
2262                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2263                         if (txq->nq_ring)
2264                                 bnxt_free_nq_ring(bp, txq->nq_ring);
2265                 }
2266         }
2267
2268         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2269                 bnxt_free_hwrm_rx_ring(bp, i);
2270
2271         return 0;
2272 }
2273
2274 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2275 {
2276         uint16_t i;
2277         uint32_t rc = 0;
2278
2279         if (!BNXT_HAS_RING_GRPS(bp))
2280                 return 0;
2281
2282         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2283                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2284                 if (rc)
2285                         return rc;
2286         }
2287         return rc;
2288 }
2289
2290 void bnxt_free_hwrm_resources(struct bnxt *bp)
2291 {
2292         /* Release memzone */
2293         rte_free(bp->hwrm_cmd_resp_addr);
2294         rte_free(bp->hwrm_short_cmd_req_addr);
2295         bp->hwrm_cmd_resp_addr = NULL;
2296         bp->hwrm_short_cmd_req_addr = NULL;
2297         bp->hwrm_cmd_resp_dma_addr = 0;
2298         bp->hwrm_short_cmd_req_dma_addr = 0;
2299 }
2300
2301 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2302 {
2303         struct rte_pci_device *pdev = bp->pdev;
2304         char type[RTE_MEMZONE_NAMESIZE];
2305
2306         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2307                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2308         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2309         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2310         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2311         if (bp->hwrm_cmd_resp_addr == NULL)
2312                 return -ENOMEM;
2313         bp->hwrm_cmd_resp_dma_addr =
2314                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2315         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2316                 PMD_DRV_LOG(ERR,
2317                         "unable to map response address to physical memory\n");
2318                 return -ENOMEM;
2319         }
2320         rte_spinlock_init(&bp->hwrm_lock);
2321
2322         return 0;
2323 }
2324
2325 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2326 {
2327         struct bnxt_filter_info *filter;
2328         int rc = 0;
2329
2330         STAILQ_FOREACH(filter, &vnic->filter, next) {
2331                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2332                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2333                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2334                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2335                 else
2336                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2337                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2338                 bnxt_free_filter(bp, filter);
2339                 //if (rc)
2340                         //break;
2341         }
2342         return rc;
2343 }
2344
2345 static int
2346 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2347 {
2348         struct bnxt_filter_info *filter;
2349         struct rte_flow *flow;
2350         int rc = 0;
2351
2352         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2353                 filter = flow->filter;
2354                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2355                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2356                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2357                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2358                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2359                 else
2360                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2361
2362                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2363                 rte_free(flow);
2364                 //if (rc)
2365                         //break;
2366         }
2367         return rc;
2368 }
2369
2370 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2371 {
2372         struct bnxt_filter_info *filter;
2373         int rc = 0;
2374
2375         STAILQ_FOREACH(filter, &vnic->filter, next) {
2376                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2377                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2378                                                      filter);
2379                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2380                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2381                                                          filter);
2382                 else
2383                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2384                                                      filter);
2385                 if (rc)
2386                         break;
2387         }
2388         return rc;
2389 }
2390
2391 void bnxt_free_tunnel_ports(struct bnxt *bp)
2392 {
2393         if (bp->vxlan_port_cnt)
2394                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2395                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2396         bp->vxlan_port = 0;
2397         if (bp->geneve_port_cnt)
2398                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2399                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2400         bp->geneve_port = 0;
2401 }
2402
2403 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2404 {
2405         int i, j;
2406
2407         if (bp->vnic_info == NULL)
2408                 return;
2409
2410         /*
2411          * Cleanup VNICs in reverse order, to make sure the L2 filter
2412          * from vnic0 is last to be cleaned up.
2413          */
2414         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2415                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2416
2417                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2418                         PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2419                         return;
2420                 }
2421
2422                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2423
2424                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2425
2426                 if (BNXT_CHIP_THOR(bp)) {
2427                         for (j = 0; j < vnic->num_lb_ctxts; j++) {
2428                                 bnxt_hwrm_vnic_ctx_free(bp, vnic,
2429                                                         vnic->fw_grp_ids[j]);
2430                                 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2431                         }
2432                         vnic->num_lb_ctxts = 0;
2433                 } else {
2434                         bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2435                         vnic->rss_rule = INVALID_HW_RING_ID;
2436                 }
2437
2438                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2439
2440                 bnxt_hwrm_vnic_free(bp, vnic);
2441
2442                 rte_free(vnic->fw_grp_ids);
2443         }
2444         /* Ring resources */
2445         bnxt_free_all_hwrm_rings(bp);
2446         bnxt_free_all_hwrm_ring_grps(bp);
2447         bnxt_free_all_hwrm_stat_ctxs(bp);
2448         bnxt_free_tunnel_ports(bp);
2449 }
2450
2451 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2452 {
2453         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2454
2455         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2456                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2457
2458         switch (conf_link_speed) {
2459         case ETH_LINK_SPEED_10M_HD:
2460         case ETH_LINK_SPEED_100M_HD:
2461                 /* FALLTHROUGH */
2462                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2463         }
2464         return hw_link_duplex;
2465 }
2466
2467 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2468 {
2469         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2470 }
2471
2472 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2473 {
2474         uint16_t eth_link_speed = 0;
2475
2476         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2477                 return ETH_LINK_SPEED_AUTONEG;
2478
2479         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2480         case ETH_LINK_SPEED_100M:
2481         case ETH_LINK_SPEED_100M_HD:
2482                 /* FALLTHROUGH */
2483                 eth_link_speed =
2484                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2485                 break;
2486         case ETH_LINK_SPEED_1G:
2487                 eth_link_speed =
2488                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2489                 break;
2490         case ETH_LINK_SPEED_2_5G:
2491                 eth_link_speed =
2492                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2493                 break;
2494         case ETH_LINK_SPEED_10G:
2495                 eth_link_speed =
2496                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2497                 break;
2498         case ETH_LINK_SPEED_20G:
2499                 eth_link_speed =
2500                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2501                 break;
2502         case ETH_LINK_SPEED_25G:
2503                 eth_link_speed =
2504                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2505                 break;
2506         case ETH_LINK_SPEED_40G:
2507                 eth_link_speed =
2508                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2509                 break;
2510         case ETH_LINK_SPEED_50G:
2511                 eth_link_speed =
2512                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2513                 break;
2514         case ETH_LINK_SPEED_100G:
2515                 eth_link_speed =
2516                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2517                 break;
2518         default:
2519                 PMD_DRV_LOG(ERR,
2520                         "Unsupported link speed %d; default to AUTO\n",
2521                         conf_link_speed);
2522                 break;
2523         }
2524         return eth_link_speed;
2525 }
2526
2527 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2528                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2529                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2530                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2531
2532 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2533 {
2534         uint32_t one_speed;
2535
2536         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2537                 return 0;
2538
2539         if (link_speed & ETH_LINK_SPEED_FIXED) {
2540                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2541
2542                 if (one_speed & (one_speed - 1)) {
2543                         PMD_DRV_LOG(ERR,
2544                                 "Invalid advertised speeds (%u) for port %u\n",
2545                                 link_speed, port_id);
2546                         return -EINVAL;
2547                 }
2548                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2549                         PMD_DRV_LOG(ERR,
2550                                 "Unsupported advertised speed (%u) for port %u\n",
2551                                 link_speed, port_id);
2552                         return -EINVAL;
2553                 }
2554         } else {
2555                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2556                         PMD_DRV_LOG(ERR,
2557                                 "Unsupported advertised speeds (%u) for port %u\n",
2558                                 link_speed, port_id);
2559                         return -EINVAL;
2560                 }
2561         }
2562         return 0;
2563 }
2564
2565 static uint16_t
2566 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2567 {
2568         uint16_t ret = 0;
2569
2570         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2571                 if (bp->link_info.support_speeds)
2572                         return bp->link_info.support_speeds;
2573                 link_speed = BNXT_SUPPORTED_SPEEDS;
2574         }
2575
2576         if (link_speed & ETH_LINK_SPEED_100M)
2577                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2578         if (link_speed & ETH_LINK_SPEED_100M_HD)
2579                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2580         if (link_speed & ETH_LINK_SPEED_1G)
2581                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2582         if (link_speed & ETH_LINK_SPEED_2_5G)
2583                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2584         if (link_speed & ETH_LINK_SPEED_10G)
2585                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2586         if (link_speed & ETH_LINK_SPEED_20G)
2587                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2588         if (link_speed & ETH_LINK_SPEED_25G)
2589                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2590         if (link_speed & ETH_LINK_SPEED_40G)
2591                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2592         if (link_speed & ETH_LINK_SPEED_50G)
2593                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2594         if (link_speed & ETH_LINK_SPEED_100G)
2595                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2596         return ret;
2597 }
2598
2599 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2600 {
2601         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2602
2603         switch (hw_link_speed) {
2604         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2605                 eth_link_speed = ETH_SPEED_NUM_100M;
2606                 break;
2607         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2608                 eth_link_speed = ETH_SPEED_NUM_1G;
2609                 break;
2610         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2611                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2612                 break;
2613         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2614                 eth_link_speed = ETH_SPEED_NUM_10G;
2615                 break;
2616         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2617                 eth_link_speed = ETH_SPEED_NUM_20G;
2618                 break;
2619         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2620                 eth_link_speed = ETH_SPEED_NUM_25G;
2621                 break;
2622         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2623                 eth_link_speed = ETH_SPEED_NUM_40G;
2624                 break;
2625         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2626                 eth_link_speed = ETH_SPEED_NUM_50G;
2627                 break;
2628         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2629                 eth_link_speed = ETH_SPEED_NUM_100G;
2630                 break;
2631         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2632         default:
2633                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2634                         hw_link_speed);
2635                 break;
2636         }
2637         return eth_link_speed;
2638 }
2639
2640 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2641 {
2642         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2643
2644         switch (hw_link_duplex) {
2645         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2646         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2647                 /* FALLTHROUGH */
2648                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2649                 break;
2650         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2651                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2652                 break;
2653         default:
2654                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2655                         hw_link_duplex);
2656                 break;
2657         }
2658         return eth_link_duplex;
2659 }
2660
2661 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2662 {
2663         int rc = 0;
2664         struct bnxt_link_info *link_info = &bp->link_info;
2665
2666         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2667         if (rc) {
2668                 PMD_DRV_LOG(ERR,
2669                         "Get link config failed with rc %d\n", rc);
2670                 goto exit;
2671         }
2672         if (link_info->link_speed)
2673                 link->link_speed =
2674                         bnxt_parse_hw_link_speed(link_info->link_speed);
2675         else
2676                 link->link_speed = ETH_SPEED_NUM_NONE;
2677         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2678         link->link_status = link_info->link_up;
2679         link->link_autoneg = link_info->auto_mode ==
2680                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2681                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2682 exit:
2683         return rc;
2684 }
2685
2686 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2687 {
2688         int rc = 0;
2689         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2690         struct bnxt_link_info link_req;
2691         uint16_t speed, autoneg;
2692
2693         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2694                 return 0;
2695
2696         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2697                         bp->eth_dev->data->port_id);
2698         if (rc)
2699                 goto error;
2700
2701         memset(&link_req, 0, sizeof(link_req));
2702         link_req.link_up = link_up;
2703         if (!link_up)
2704                 goto port_phy_cfg;
2705
2706         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2707         if (BNXT_CHIP_THOR(bp) &&
2708             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
2709                 /* 40G is not supported as part of media auto detect.
2710                  * The speed should be forced and autoneg disabled
2711                  * to configure 40G speed.
2712                  */
2713                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
2714                 autoneg = 0;
2715         }
2716
2717         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2718         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2719         /* Autoneg can be done only when the FW allows.
2720          * When user configures fixed speed of 40G and later changes to
2721          * any other speed, auto_link_speed/force_link_speed is still set
2722          * to 40G until link comes up at new speed.
2723          */
2724         if (autoneg == 1 &&
2725             !(!BNXT_CHIP_THOR(bp) &&
2726               (bp->link_info.auto_link_speed ||
2727                bp->link_info.force_link_speed))) {
2728                 link_req.phy_flags |=
2729                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2730                 link_req.auto_link_speed_mask =
2731                         bnxt_parse_eth_link_speed_mask(bp,
2732                                                        dev_conf->link_speeds);
2733         } else {
2734                 if (bp->link_info.phy_type ==
2735                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2736                     bp->link_info.phy_type ==
2737                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2738                     bp->link_info.media_type ==
2739                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2740                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2741                         return -EINVAL;
2742                 }
2743
2744                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2745                 /* If user wants a particular speed try that first. */
2746                 if (speed)
2747                         link_req.link_speed = speed;
2748                 else if (bp->link_info.force_link_speed)
2749                         link_req.link_speed = bp->link_info.force_link_speed;
2750                 else
2751                         link_req.link_speed = bp->link_info.auto_link_speed;
2752         }
2753         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2754         link_req.auto_pause = bp->link_info.auto_pause;
2755         link_req.force_pause = bp->link_info.force_pause;
2756
2757 port_phy_cfg:
2758         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2759         if (rc) {
2760                 PMD_DRV_LOG(ERR,
2761                         "Set link config failed with rc %d\n", rc);
2762         }
2763
2764 error:
2765         return rc;
2766 }
2767
2768 /* JIRA 22088 */
2769 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
2770 {
2771         struct hwrm_func_qcfg_input req = {0};
2772         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2773         uint16_t flags;
2774         int rc = 0;
2775
2776         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2777         req.fid = rte_cpu_to_le_16(0xffff);
2778
2779         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2780
2781         HWRM_CHECK_RESULT();
2782
2783         /* Hard Coded.. 0xfff VLAN ID mask */
2784         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2785         flags = rte_le_to_cpu_16(resp->flags);
2786         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2787                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2788
2789         if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2790                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2791                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2792         } else if (BNXT_VF(bp) &&
2793                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2794                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
2795                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
2796         }
2797
2798         if (mtu)
2799                 *mtu = resp->mtu;
2800
2801         switch (resp->port_partition_type) {
2802         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2803         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2804         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2805                 /* FALLTHROUGH */
2806                 bp->port_partition_type = resp->port_partition_type;
2807                 break;
2808         default:
2809                 bp->port_partition_type = 0;
2810                 break;
2811         }
2812
2813         HWRM_UNLOCK();
2814
2815         return rc;
2816 }
2817
2818 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2819                                    struct hwrm_func_qcaps_output *qcaps)
2820 {
2821         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2822         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2823                sizeof(qcaps->mac_address));
2824         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2825         qcaps->max_rx_rings = fcfg->num_rx_rings;
2826         qcaps->max_tx_rings = fcfg->num_tx_rings;
2827         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2828         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2829         qcaps->max_vfs = 0;
2830         qcaps->first_vf_id = 0;
2831         qcaps->max_vnics = fcfg->num_vnics;
2832         qcaps->max_decap_records = 0;
2833         qcaps->max_encap_records = 0;
2834         qcaps->max_tx_wm_flows = 0;
2835         qcaps->max_tx_em_flows = 0;
2836         qcaps->max_rx_wm_flows = 0;
2837         qcaps->max_rx_em_flows = 0;
2838         qcaps->max_flow_id = 0;
2839         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2840         qcaps->max_sp_tx_rings = 0;
2841         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2842 }
2843
2844 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2845 {
2846         struct hwrm_func_cfg_input req = {0};
2847         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2848         uint32_t enables;
2849         int rc;
2850
2851         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2852                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2853                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2854                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2855                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2856                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2857                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2858                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2859                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
2860
2861         if (BNXT_HAS_RING_GRPS(bp)) {
2862                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
2863                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2864         } else if (BNXT_HAS_NQ(bp)) {
2865                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
2866                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
2867         }
2868
2869         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2870         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2871         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2872                                    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2873                                    BNXT_NUM_VLANS);
2874         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2875         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2876         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2877         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2878         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2879         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2880         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2881         req.fid = rte_cpu_to_le_16(0xffff);
2882         req.enables = rte_cpu_to_le_32(enables);
2883
2884         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2885
2886         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2887
2888         HWRM_CHECK_RESULT();
2889         HWRM_UNLOCK();
2890
2891         return rc;
2892 }
2893
2894 static void populate_vf_func_cfg_req(struct bnxt *bp,
2895                                      struct hwrm_func_cfg_input *req,
2896                                      int num_vfs)
2897 {
2898         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2899                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2900                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2901                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2902                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2903                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2904                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2905                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2906                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2907                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2908
2909         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2910                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2911                                     BNXT_NUM_VLANS);
2912         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2913                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2914                                     BNXT_NUM_VLANS);
2915         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2916                                                 (num_vfs + 1));
2917         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2918         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2919                                                (num_vfs + 1));
2920         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2921         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2922         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2923         /* TODO: For now, do not support VMDq/RFS on VFs. */
2924         req->num_vnics = rte_cpu_to_le_16(1);
2925         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2926                                                  (num_vfs + 1));
2927 }
2928
2929 static void add_random_mac_if_needed(struct bnxt *bp,
2930                                      struct hwrm_func_cfg_input *cfg_req,
2931                                      int vf)
2932 {
2933         struct rte_ether_addr mac;
2934
2935         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2936                 return;
2937
2938         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2939                 cfg_req->enables |=
2940                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2941                 rte_eth_random_addr(cfg_req->dflt_mac_addr);
2942                 bp->pf.vf_info[vf].random_mac = true;
2943         } else {
2944                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
2945                         RTE_ETHER_ADDR_LEN);
2946         }
2947 }
2948
2949 static void reserve_resources_from_vf(struct bnxt *bp,
2950                                       struct hwrm_func_cfg_input *cfg_req,
2951                                       int vf)
2952 {
2953         struct hwrm_func_qcaps_input req = {0};
2954         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2955         int rc;
2956
2957         /* Get the actual allocated values now */
2958         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2959         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2960         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2961
2962         if (rc) {
2963                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2964                 copy_func_cfg_to_qcaps(cfg_req, resp);
2965         } else if (resp->error_code) {
2966                 rc = rte_le_to_cpu_16(resp->error_code);
2967                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2968                 copy_func_cfg_to_qcaps(cfg_req, resp);
2969         }
2970
2971         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2972         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2973         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2974         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2975         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2976         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2977         /*
2978          * TODO: While not supporting VMDq with VFs, max_vnics is always
2979          * forced to 1 in this case
2980          */
2981         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2982         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2983
2984         HWRM_UNLOCK();
2985 }
2986
2987 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2988 {
2989         struct hwrm_func_qcfg_input req = {0};
2990         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2991         int rc;
2992
2993         /* Check for zero MAC address */
2994         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2995         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2997         HWRM_CHECK_RESULT();
2998         rc = rte_le_to_cpu_16(resp->vlan);
2999
3000         HWRM_UNLOCK();
3001
3002         return rc;
3003 }
3004
3005 static int update_pf_resource_max(struct bnxt *bp)
3006 {
3007         struct hwrm_func_qcfg_input req = {0};
3008         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3009         int rc;
3010
3011         /* And copy the allocated numbers into the pf struct */
3012         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3013         req.fid = rte_cpu_to_le_16(0xffff);
3014         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3015         HWRM_CHECK_RESULT();
3016
3017         /* Only TX ring value reflects actual allocation? TODO */
3018         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3019         bp->pf.evb_mode = resp->evb_mode;
3020
3021         HWRM_UNLOCK();
3022
3023         return rc;
3024 }
3025
3026 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3027 {
3028         int rc;
3029
3030         if (!BNXT_PF(bp)) {
3031                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3032                 return -EINVAL;
3033         }
3034
3035         rc = bnxt_hwrm_func_qcaps(bp);
3036         if (rc)
3037                 return rc;
3038
3039         bp->pf.func_cfg_flags &=
3040                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3041                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3042         bp->pf.func_cfg_flags |=
3043                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3044         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3045         rc = __bnxt_hwrm_func_qcaps(bp);
3046         return rc;
3047 }
3048
3049 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3050 {
3051         struct hwrm_func_cfg_input req = {0};
3052         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3053         int i;
3054         size_t sz;
3055         int rc = 0;
3056         size_t req_buf_sz;
3057
3058         if (!BNXT_PF(bp)) {
3059                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3060                 return -EINVAL;
3061         }
3062
3063         rc = bnxt_hwrm_func_qcaps(bp);
3064
3065         if (rc)
3066                 return rc;
3067
3068         bp->pf.active_vfs = num_vfs;
3069
3070         /*
3071          * First, configure the PF to only use one TX ring.  This ensures that
3072          * there are enough rings for all VFs.
3073          *
3074          * If we don't do this, when we call func_alloc() later, we will lock
3075          * extra rings to the PF that won't be available during func_cfg() of
3076          * the VFs.
3077          *
3078          * This has been fixed with firmware versions above 20.6.54
3079          */
3080         bp->pf.func_cfg_flags &=
3081                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3082                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3083         bp->pf.func_cfg_flags |=
3084                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3085         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3086         if (rc)
3087                 return rc;
3088
3089         /*
3090          * Now, create and register a buffer to hold forwarded VF requests
3091          */
3092         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3093         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3094                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3095         if (bp->pf.vf_req_buf == NULL) {
3096                 rc = -ENOMEM;
3097                 goto error_free;
3098         }
3099         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3100                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
3101         for (i = 0; i < num_vfs; i++)
3102                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
3103                                         (i * HWRM_MAX_REQ_LEN);
3104
3105         rc = bnxt_hwrm_func_buf_rgtr(bp);
3106         if (rc)
3107                 goto error_free;
3108
3109         populate_vf_func_cfg_req(bp, &req, num_vfs);
3110
3111         bp->pf.active_vfs = 0;
3112         for (i = 0; i < num_vfs; i++) {
3113                 add_random_mac_if_needed(bp, &req, i);
3114
3115                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3116                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
3117                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
3118                 rc = bnxt_hwrm_send_message(bp,
3119                                             &req,
3120                                             sizeof(req),
3121                                             BNXT_USE_CHIMP_MB);
3122
3123                 /* Clear enable flag for next pass */
3124                 req.enables &= ~rte_cpu_to_le_32(
3125                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3126
3127                 if (rc || resp->error_code) {
3128                         PMD_DRV_LOG(ERR,
3129                                 "Failed to initizlie VF %d\n", i);
3130                         PMD_DRV_LOG(ERR,
3131                                 "Not all VFs available. (%d, %d)\n",
3132                                 rc, resp->error_code);
3133                         HWRM_UNLOCK();
3134                         break;
3135                 }
3136
3137                 HWRM_UNLOCK();
3138
3139                 reserve_resources_from_vf(bp, &req, i);
3140                 bp->pf.active_vfs++;
3141                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
3142         }
3143
3144         /*
3145          * Now configure the PF to use "the rest" of the resources
3146          * We're using STD_TX_RING_MODE here though which will limit the TX
3147          * rings.  This will allow QoS to function properly.  Not setting this
3148          * will cause PF rings to break bandwidth settings.
3149          */
3150         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3151         if (rc)
3152                 goto error_free;
3153
3154         rc = update_pf_resource_max(bp);
3155         if (rc)
3156                 goto error_free;
3157
3158         return rc;
3159
3160 error_free:
3161         bnxt_hwrm_func_buf_unrgtr(bp);
3162         return rc;
3163 }
3164
3165 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3166 {
3167         struct hwrm_func_cfg_input req = {0};
3168         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3169         int rc;
3170
3171         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3172
3173         req.fid = rte_cpu_to_le_16(0xffff);
3174         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3175         req.evb_mode = bp->pf.evb_mode;
3176
3177         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3178         HWRM_CHECK_RESULT();
3179         HWRM_UNLOCK();
3180
3181         return rc;
3182 }
3183
3184 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3185                                 uint8_t tunnel_type)
3186 {
3187         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3188         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3189         int rc = 0;
3190
3191         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3192         req.tunnel_type = tunnel_type;
3193         req.tunnel_dst_port_val = port;
3194         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3195         HWRM_CHECK_RESULT();
3196
3197         switch (tunnel_type) {
3198         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3199                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3200                 bp->vxlan_port = port;
3201                 break;
3202         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3203                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
3204                 bp->geneve_port = port;
3205                 break;
3206         default:
3207                 break;
3208         }
3209
3210         HWRM_UNLOCK();
3211
3212         return rc;
3213 }
3214
3215 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3216                                 uint8_t tunnel_type)
3217 {
3218         struct hwrm_tunnel_dst_port_free_input req = {0};
3219         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3220         int rc = 0;
3221
3222         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3223
3224         req.tunnel_type = tunnel_type;
3225         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3226         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3227
3228         HWRM_CHECK_RESULT();
3229         HWRM_UNLOCK();
3230
3231         return rc;
3232 }
3233
3234 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3235                                         uint32_t flags)
3236 {
3237         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3238         struct hwrm_func_cfg_input req = {0};
3239         int rc;
3240
3241         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3242
3243         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3244         req.flags = rte_cpu_to_le_32(flags);
3245         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3246
3247         HWRM_CHECK_RESULT();
3248         HWRM_UNLOCK();
3249
3250         return rc;
3251 }
3252
3253 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3254 {
3255         uint32_t *flag = flagp;
3256
3257         vnic->flags = *flag;
3258 }
3259
3260 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3261 {
3262         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3263 }
3264
3265 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3266 {
3267         int rc = 0;
3268         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3269         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3270
3271         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3272
3273         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3274         req.req_buf_page_size = rte_cpu_to_le_16(
3275                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
3276         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3277         req.req_buf_page_addr0 =
3278                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
3279         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3280                 PMD_DRV_LOG(ERR,
3281                         "unable to map buffer address to physical memory\n");
3282                 return -ENOMEM;
3283         }
3284
3285         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3286
3287         HWRM_CHECK_RESULT();
3288         HWRM_UNLOCK();
3289
3290         return rc;
3291 }
3292
3293 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3294 {
3295         int rc = 0;
3296         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3297         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3298
3299         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3300                 return 0;
3301
3302         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3303
3304         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3305
3306         HWRM_CHECK_RESULT();
3307         HWRM_UNLOCK();
3308
3309         return rc;
3310 }
3311
3312 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3313 {
3314         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3315         struct hwrm_func_cfg_input req = {0};
3316         int rc;
3317
3318         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3319
3320         req.fid = rte_cpu_to_le_16(0xffff);
3321         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
3322         req.enables = rte_cpu_to_le_32(
3323                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3324         req.async_event_cr = rte_cpu_to_le_16(
3325                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3326         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3327
3328         HWRM_CHECK_RESULT();
3329         HWRM_UNLOCK();
3330
3331         return rc;
3332 }
3333
3334 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3335 {
3336         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3337         struct hwrm_func_vf_cfg_input req = {0};
3338         int rc;
3339
3340         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3341
3342         req.enables = rte_cpu_to_le_32(
3343                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3344         req.async_event_cr = rte_cpu_to_le_16(
3345                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3346         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3347
3348         HWRM_CHECK_RESULT();
3349         HWRM_UNLOCK();
3350
3351         return rc;
3352 }
3353
3354 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3355 {
3356         struct hwrm_func_cfg_input req = {0};
3357         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3358         uint16_t dflt_vlan, fid;
3359         uint32_t func_cfg_flags;
3360         int rc = 0;
3361
3362         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3363
3364         if (is_vf) {
3365                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3366                 fid = bp->pf.vf_info[vf].fid;
3367                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3368         } else {
3369                 fid = rte_cpu_to_le_16(0xffff);
3370                 func_cfg_flags = bp->pf.func_cfg_flags;
3371                 dflt_vlan = bp->vlan;
3372         }
3373
3374         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3375         req.fid = rte_cpu_to_le_16(fid);
3376         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3377         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3378
3379         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3380
3381         HWRM_CHECK_RESULT();
3382         HWRM_UNLOCK();
3383
3384         return rc;
3385 }
3386
3387 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3388                         uint16_t max_bw, uint16_t enables)
3389 {
3390         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3391         struct hwrm_func_cfg_input req = {0};
3392         int rc;
3393
3394         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3395
3396         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3397         req.enables |= rte_cpu_to_le_32(enables);
3398         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3399         req.max_bw = rte_cpu_to_le_32(max_bw);
3400         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3401
3402         HWRM_CHECK_RESULT();
3403         HWRM_UNLOCK();
3404
3405         return rc;
3406 }
3407
3408 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3409 {
3410         struct hwrm_func_cfg_input req = {0};
3411         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3412         int rc = 0;
3413
3414         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3415
3416         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3417         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3418         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3419         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3420
3421         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3422
3423         HWRM_CHECK_RESULT();
3424         HWRM_UNLOCK();
3425
3426         return rc;
3427 }
3428
3429 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3430 {
3431         int rc;
3432
3433         if (BNXT_PF(bp))
3434                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3435         else
3436                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3437
3438         return rc;
3439 }
3440
3441 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3442                               void *encaped, size_t ec_size)
3443 {
3444         int rc = 0;
3445         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3446         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3447
3448         if (ec_size > sizeof(req.encap_request))
3449                 return -1;
3450
3451         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3452
3453         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3454         memcpy(req.encap_request, encaped, ec_size);
3455
3456         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3457
3458         HWRM_CHECK_RESULT();
3459         HWRM_UNLOCK();
3460
3461         return rc;
3462 }
3463
3464 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3465                                        struct rte_ether_addr *mac)
3466 {
3467         struct hwrm_func_qcfg_input req = {0};
3468         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3469         int rc;
3470
3471         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3472
3473         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3474         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3475
3476         HWRM_CHECK_RESULT();
3477
3478         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3479
3480         HWRM_UNLOCK();
3481
3482         return rc;
3483 }
3484
3485 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3486                             void *encaped, size_t ec_size)
3487 {
3488         int rc = 0;
3489         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3490         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3491
3492         if (ec_size > sizeof(req.encap_request))
3493                 return -1;
3494
3495         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3496
3497         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3498         memcpy(req.encap_request, encaped, ec_size);
3499
3500         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3501
3502         HWRM_CHECK_RESULT();
3503         HWRM_UNLOCK();
3504
3505         return rc;
3506 }
3507
3508 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3509                          struct rte_eth_stats *stats, uint8_t rx)
3510 {
3511         int rc = 0;
3512         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3513         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3514
3515         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3516
3517         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3518
3519         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3520
3521         HWRM_CHECK_RESULT();
3522
3523         if (rx) {
3524                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3525                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3526                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3527                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3528                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3529                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3530                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3531                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3532         } else {
3533                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3534                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3535                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3536                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3537                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3538                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3539         }
3540
3541
3542         HWRM_UNLOCK();
3543
3544         return rc;
3545 }
3546
3547 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3548 {
3549         struct hwrm_port_qstats_input req = {0};
3550         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3551         struct bnxt_pf_info *pf = &bp->pf;
3552         int rc;
3553
3554         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3555
3556         req.port_id = rte_cpu_to_le_16(pf->port_id);
3557         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3558         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3559         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3560
3561         HWRM_CHECK_RESULT();
3562         HWRM_UNLOCK();
3563
3564         return rc;
3565 }
3566
3567 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3568 {
3569         struct hwrm_port_clr_stats_input req = {0};
3570         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3571         struct bnxt_pf_info *pf = &bp->pf;
3572         int rc;
3573
3574         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3575         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3576             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3577                 return 0;
3578
3579         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3580
3581         req.port_id = rte_cpu_to_le_16(pf->port_id);
3582         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3583
3584         HWRM_CHECK_RESULT();
3585         HWRM_UNLOCK();
3586
3587         return rc;
3588 }
3589
3590 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3591 {
3592         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3593         struct hwrm_port_led_qcaps_input req = {0};
3594         int rc;
3595
3596         if (BNXT_VF(bp))
3597                 return 0;
3598
3599         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3600         req.port_id = bp->pf.port_id;
3601         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3602
3603         HWRM_CHECK_RESULT();
3604
3605         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3606                 unsigned int i;
3607
3608                 bp->num_leds = resp->num_leds;
3609                 memcpy(bp->leds, &resp->led0_id,
3610                         sizeof(bp->leds[0]) * bp->num_leds);
3611                 for (i = 0; i < bp->num_leds; i++) {
3612                         struct bnxt_led_info *led = &bp->leds[i];
3613
3614                         uint16_t caps = led->led_state_caps;
3615
3616                         if (!led->led_group_id ||
3617                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3618                                 bp->num_leds = 0;
3619                                 break;
3620                         }
3621                 }
3622         }
3623
3624         HWRM_UNLOCK();
3625
3626         return rc;
3627 }
3628
3629 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3630 {
3631         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3632         struct hwrm_port_led_cfg_input req = {0};
3633         struct bnxt_led_cfg *led_cfg;
3634         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3635         uint16_t duration = 0;
3636         int rc, i;
3637
3638         if (!bp->num_leds || BNXT_VF(bp))
3639                 return -EOPNOTSUPP;
3640
3641         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3642
3643         if (led_on) {
3644                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3645                 duration = rte_cpu_to_le_16(500);
3646         }
3647         req.port_id = bp->pf.port_id;
3648         req.num_leds = bp->num_leds;
3649         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3650         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3651                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3652                 led_cfg->led_id = bp->leds[i].led_id;
3653                 led_cfg->led_state = led_state;
3654                 led_cfg->led_blink_on = duration;
3655                 led_cfg->led_blink_off = duration;
3656                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3657         }
3658
3659         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3660
3661         HWRM_CHECK_RESULT();
3662         HWRM_UNLOCK();
3663
3664         return rc;
3665 }
3666
3667 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3668                                uint32_t *length)
3669 {
3670         int rc;
3671         struct hwrm_nvm_get_dir_info_input req = {0};
3672         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3673
3674         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3675
3676         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3677
3678         HWRM_CHECK_RESULT();
3679
3680         *entries = rte_le_to_cpu_32(resp->entries);
3681         *length = rte_le_to_cpu_32(resp->entry_length);
3682
3683         HWRM_UNLOCK();
3684         return rc;
3685 }
3686
3687 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3688 {
3689         int rc;
3690         uint32_t dir_entries;
3691         uint32_t entry_length;
3692         uint8_t *buf;
3693         size_t buflen;
3694         rte_iova_t dma_handle;
3695         struct hwrm_nvm_get_dir_entries_input req = {0};
3696         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3697
3698         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3699         if (rc != 0)
3700                 return rc;
3701
3702         *data++ = dir_entries;
3703         *data++ = entry_length;
3704         len -= 2;
3705         memset(data, 0xff, len);
3706
3707         buflen = dir_entries * entry_length;
3708         buf = rte_malloc("nvm_dir", buflen, 0);
3709         rte_mem_lock_page(buf);
3710         if (buf == NULL)
3711                 return -ENOMEM;
3712         dma_handle = rte_mem_virt2iova(buf);
3713         if (dma_handle == RTE_BAD_IOVA) {
3714                 PMD_DRV_LOG(ERR,
3715                         "unable to map response address to physical memory\n");
3716                 return -ENOMEM;
3717         }
3718         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3719         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3720         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3721
3722         if (rc == 0)
3723                 memcpy(data, buf, len > buflen ? buflen : len);
3724
3725         rte_free(buf);
3726         HWRM_CHECK_RESULT();
3727         HWRM_UNLOCK();
3728
3729         return rc;
3730 }
3731
3732 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3733                              uint32_t offset, uint32_t length,
3734                              uint8_t *data)
3735 {
3736         int rc;
3737         uint8_t *buf;
3738         rte_iova_t dma_handle;
3739         struct hwrm_nvm_read_input req = {0};
3740         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3741
3742         buf = rte_malloc("nvm_item", length, 0);
3743         rte_mem_lock_page(buf);
3744         if (!buf)
3745                 return -ENOMEM;
3746
3747         dma_handle = rte_mem_virt2iova(buf);
3748         if (dma_handle == RTE_BAD_IOVA) {
3749                 PMD_DRV_LOG(ERR,
3750                         "unable to map response address to physical memory\n");
3751                 return -ENOMEM;
3752         }
3753         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3754         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3755         req.dir_idx = rte_cpu_to_le_16(index);
3756         req.offset = rte_cpu_to_le_32(offset);
3757         req.len = rte_cpu_to_le_32(length);
3758         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3759         if (rc == 0)
3760                 memcpy(data, buf, length);
3761
3762         rte_free(buf);
3763         HWRM_CHECK_RESULT();
3764         HWRM_UNLOCK();
3765
3766         return rc;
3767 }
3768
3769 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3770 {
3771         int rc;
3772         struct hwrm_nvm_erase_dir_entry_input req = {0};
3773         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3774
3775         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3776         req.dir_idx = rte_cpu_to_le_16(index);
3777         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3778         HWRM_CHECK_RESULT();
3779         HWRM_UNLOCK();
3780
3781         return rc;
3782 }
3783
3784
3785 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3786                           uint16_t dir_ordinal, uint16_t dir_ext,
3787                           uint16_t dir_attr, const uint8_t *data,
3788                           size_t data_len)
3789 {
3790         int rc;
3791         struct hwrm_nvm_write_input req = {0};
3792         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3793         rte_iova_t dma_handle;
3794         uint8_t *buf;
3795
3796         buf = rte_malloc("nvm_write", data_len, 0);
3797         rte_mem_lock_page(buf);
3798         if (!buf)
3799                 return -ENOMEM;
3800
3801         dma_handle = rte_mem_virt2iova(buf);
3802         if (dma_handle == RTE_BAD_IOVA) {
3803                 PMD_DRV_LOG(ERR,
3804                         "unable to map response address to physical memory\n");
3805                 return -ENOMEM;
3806         }
3807         memcpy(buf, data, data_len);
3808
3809         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3810
3811         req.dir_type = rte_cpu_to_le_16(dir_type);
3812         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3813         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3814         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3815         req.dir_data_length = rte_cpu_to_le_32(data_len);
3816         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3817
3818         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3819
3820         rte_free(buf);
3821         HWRM_CHECK_RESULT();
3822         HWRM_UNLOCK();
3823
3824         return rc;
3825 }
3826
3827 static void
3828 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3829 {
3830         uint32_t *count = cbdata;
3831
3832         *count = *count + 1;
3833 }
3834
3835 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3836                                      struct bnxt_vnic_info *vnic __rte_unused)
3837 {
3838         return 0;
3839 }
3840
3841 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3842 {
3843         uint32_t count = 0;
3844
3845         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3846             &count, bnxt_vnic_count_hwrm_stub);
3847
3848         return count;
3849 }
3850
3851 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3852                                         uint16_t *vnic_ids)
3853 {
3854         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3855         struct hwrm_func_vf_vnic_ids_query_output *resp =
3856                                                 bp->hwrm_cmd_resp_addr;
3857         int rc;
3858
3859         /* First query all VNIC ids */
3860         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3861
3862         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3863         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3864         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3865
3866         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
3867                 HWRM_UNLOCK();
3868                 PMD_DRV_LOG(ERR,
3869                 "unable to map VNIC ID table address to physical memory\n");
3870                 return -ENOMEM;
3871         }
3872         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3873         HWRM_CHECK_RESULT();
3874         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3875
3876         HWRM_UNLOCK();
3877
3878         return rc;
3879 }
3880
3881 /*
3882  * This function queries the VNIC IDs  for a specified VF. It then calls
3883  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3884  * Then it calls the hwrm_cb function to program this new vnic configuration.
3885  */
3886 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3887         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3888         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3889 {
3890         struct bnxt_vnic_info vnic;
3891         int rc = 0;
3892         int i, num_vnic_ids;
3893         uint16_t *vnic_ids;
3894         size_t vnic_id_sz;
3895         size_t sz;
3896
3897         /* First query all VNIC ids */
3898         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3899         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3900                         RTE_CACHE_LINE_SIZE);
3901         if (vnic_ids == NULL)
3902                 return -ENOMEM;
3903
3904         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3905                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3906
3907         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3908
3909         if (num_vnic_ids < 0)
3910                 return num_vnic_ids;
3911
3912         /* Retrieve VNIC, update bd_stall then update */
3913
3914         for (i = 0; i < num_vnic_ids; i++) {
3915                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3916                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3917                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3918                 if (rc)
3919                         break;
3920                 if (vnic.mru <= 4)      /* Indicates unallocated */
3921                         continue;
3922
3923                 vnic_cb(&vnic, cbdata);
3924
3925                 rc = hwrm_cb(bp, &vnic);
3926                 if (rc)
3927                         break;
3928         }
3929
3930         rte_free(vnic_ids);
3931
3932         return rc;
3933 }
3934
3935 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3936                                               bool on)
3937 {
3938         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3939         struct hwrm_func_cfg_input req = {0};
3940         int rc;
3941
3942         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3943
3944         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3945         req.enables |= rte_cpu_to_le_32(
3946                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3947         req.vlan_antispoof_mode = on ?
3948                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3949                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3950         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3951
3952         HWRM_CHECK_RESULT();
3953         HWRM_UNLOCK();
3954
3955         return rc;
3956 }
3957
3958 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3959 {
3960         struct bnxt_vnic_info vnic;
3961         uint16_t *vnic_ids;
3962         size_t vnic_id_sz;
3963         int num_vnic_ids, i;
3964         size_t sz;
3965         int rc;
3966
3967         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3968         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3969                         RTE_CACHE_LINE_SIZE);
3970         if (vnic_ids == NULL)
3971                 return -ENOMEM;
3972
3973         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3974                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3975
3976         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3977         if (rc <= 0)
3978                 goto exit;
3979         num_vnic_ids = rc;
3980
3981         /*
3982          * Loop through to find the default VNIC ID.
3983          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3984          * by sending the hwrm_func_qcfg command to the firmware.
3985          */
3986         for (i = 0; i < num_vnic_ids; i++) {
3987                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3988                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3989                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3990                                         bp->pf.first_vf_id + vf);
3991                 if (rc)
3992                         goto exit;
3993                 if (vnic.func_default) {
3994                         rte_free(vnic_ids);
3995                         return vnic.fw_vnic_id;
3996                 }
3997         }
3998         /* Could not find a default VNIC. */
3999         PMD_DRV_LOG(ERR, "No default VNIC\n");
4000 exit:
4001         rte_free(vnic_ids);
4002         return rc;
4003 }
4004
4005 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4006                          uint16_t dst_id,
4007                          struct bnxt_filter_info *filter)
4008 {
4009         int rc = 0;
4010         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4011         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4012         uint32_t enables = 0;
4013
4014         if (filter->fw_em_filter_id != UINT64_MAX)
4015                 bnxt_hwrm_clear_em_filter(bp, filter);
4016
4017         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4018
4019         req.flags = rte_cpu_to_le_32(filter->flags);
4020
4021         enables = filter->enables |
4022               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4023         req.dst_id = rte_cpu_to_le_16(dst_id);
4024
4025         if (filter->ip_addr_type) {
4026                 req.ip_addr_type = filter->ip_addr_type;
4027                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4028         }
4029         if (enables &
4030             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4031                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4032         if (enables &
4033             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4034                 memcpy(req.src_macaddr, filter->src_macaddr,
4035                        RTE_ETHER_ADDR_LEN);
4036         if (enables &
4037             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4038                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4039                        RTE_ETHER_ADDR_LEN);
4040         if (enables &
4041             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4042                 req.ovlan_vid = filter->l2_ovlan;
4043         if (enables &
4044             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4045                 req.ivlan_vid = filter->l2_ivlan;
4046         if (enables &
4047             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4048                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4049         if (enables &
4050             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4051                 req.ip_protocol = filter->ip_protocol;
4052         if (enables &
4053             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4054                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4055         if (enables &
4056             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4057                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4058         if (enables &
4059             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4060                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4061         if (enables &
4062             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4063                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4064         if (enables &
4065             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4066                 req.mirror_vnic_id = filter->mirror_vnic_id;
4067
4068         req.enables = rte_cpu_to_le_32(enables);
4069
4070         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4071
4072         HWRM_CHECK_RESULT();
4073
4074         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4075         HWRM_UNLOCK();
4076
4077         return rc;
4078 }
4079
4080 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4081 {
4082         int rc = 0;
4083         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4084         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4085
4086         if (filter->fw_em_filter_id == UINT64_MAX)
4087                 return 0;
4088
4089         PMD_DRV_LOG(ERR, "Clear EM filter\n");
4090         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4091
4092         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4093
4094         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4095
4096         HWRM_CHECK_RESULT();
4097         HWRM_UNLOCK();
4098
4099         filter->fw_em_filter_id = UINT64_MAX;
4100         filter->fw_l2_filter_id = UINT64_MAX;
4101
4102         return 0;
4103 }
4104
4105 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4106                          uint16_t dst_id,
4107                          struct bnxt_filter_info *filter)
4108 {
4109         int rc = 0;
4110         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4111         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4112                                                 bp->hwrm_cmd_resp_addr;
4113         uint32_t enables = 0;
4114
4115         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4116                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4117
4118         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4119
4120         req.flags = rte_cpu_to_le_32(filter->flags);
4121
4122         enables = filter->enables |
4123               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4124         req.dst_id = rte_cpu_to_le_16(dst_id);
4125
4126
4127         if (filter->ip_addr_type) {
4128                 req.ip_addr_type = filter->ip_addr_type;
4129                 enables |=
4130                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4131         }
4132         if (enables &
4133             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4134                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4135         if (enables &
4136             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4137                 memcpy(req.src_macaddr, filter->src_macaddr,
4138                        RTE_ETHER_ADDR_LEN);
4139         //if (enables &
4140             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
4141                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
4142                        //RTE_ETHER_ADDR_LEN);
4143         if (enables &
4144             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4145                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4146         if (enables &
4147             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4148                 req.ip_protocol = filter->ip_protocol;
4149         if (enables &
4150             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4151                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4152         if (enables &
4153             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4154                 req.src_ipaddr_mask[0] =
4155                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4156         if (enables &
4157             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4158                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4159         if (enables &
4160             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4161                 req.dst_ipaddr_mask[0] =
4162                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4163         if (enables &
4164             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4165                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4166         if (enables &
4167             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4168                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4169         if (enables &
4170             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4171                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4172         if (enables &
4173             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4174                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4175         if (enables &
4176             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4177                 req.mirror_vnic_id = filter->mirror_vnic_id;
4178
4179         req.enables = rte_cpu_to_le_32(enables);
4180
4181         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4182
4183         HWRM_CHECK_RESULT();
4184
4185         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4186         HWRM_UNLOCK();
4187
4188         return rc;
4189 }
4190
4191 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4192                                 struct bnxt_filter_info *filter)
4193 {
4194         int rc = 0;
4195         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4196         struct hwrm_cfa_ntuple_filter_free_output *resp =
4197                                                 bp->hwrm_cmd_resp_addr;
4198
4199         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4200                 return 0;
4201
4202         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4203
4204         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4205
4206         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4207
4208         HWRM_CHECK_RESULT();
4209         HWRM_UNLOCK();
4210
4211         filter->fw_ntuple_filter_id = UINT64_MAX;
4212
4213         return 0;
4214 }
4215
4216 static int
4217 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4218 {
4219         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4220         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4221         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4222         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4223         uint16_t *ring_tbl = vnic->rss_table;
4224         int nr_ctxs = vnic->num_lb_ctxts;
4225         int max_rings = bp->rx_nr_rings;
4226         int i, j, k, cnt;
4227         int rc = 0;
4228
4229         for (i = 0, k = 0; i < nr_ctxs; i++) {
4230                 struct bnxt_rx_ring_info *rxr;
4231                 struct bnxt_cp_ring_info *cpr;
4232
4233                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4234
4235                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4236                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4237                 req.hash_mode_flags = vnic->hash_mode;
4238
4239                 req.ring_grp_tbl_addr =
4240                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4241                                      i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4242                                      2 * sizeof(*ring_tbl));
4243                 req.hash_key_tbl_addr =
4244                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4245
4246                 req.ring_table_pair_index = i;
4247                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4248
4249                 for (j = 0; j < 64; j++) {
4250                         uint16_t ring_id;
4251
4252                         /* Find next active ring. */
4253                         for (cnt = 0; cnt < max_rings; cnt++) {
4254                                 if (rx_queue_state[k] !=
4255                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4256                                         break;
4257                                 if (++k == max_rings)
4258                                         k = 0;
4259                         }
4260
4261                         /* Return if no rings are active. */
4262                         if (cnt == max_rings)
4263                                 return 0;
4264
4265                         /* Add rx/cp ring pair to RSS table. */
4266                         rxr = rxqs[k]->rx_ring;
4267                         cpr = rxqs[k]->cp_ring;
4268
4269                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4270                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4271                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4272                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4273
4274                         if (++k == max_rings)
4275                                 k = 0;
4276                 }
4277                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4278                                             BNXT_USE_CHIMP_MB);
4279
4280                 HWRM_CHECK_RESULT();
4281                 HWRM_UNLOCK();
4282         }
4283
4284         return rc;
4285 }
4286
4287 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4288 {
4289         unsigned int rss_idx, fw_idx, i;
4290
4291         if (!(vnic->rss_table && vnic->hash_type))
4292                 return 0;
4293
4294         if (BNXT_CHIP_THOR(bp))
4295                 return bnxt_vnic_rss_configure_thor(bp, vnic);
4296
4297         /*
4298          * Fill the RSS hash & redirection table with
4299          * ring group ids for all VNICs
4300          */
4301         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4302                 rss_idx++, fw_idx++) {
4303                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4304                         fw_idx %= bp->rx_cp_nr_rings;
4305                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
4306                                 break;
4307                         fw_idx++;
4308                 }
4309                 if (i == bp->rx_cp_nr_rings)
4310                         return 0;
4311                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4312         }
4313         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4314 }
4315
4316 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4317         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4318 {
4319         uint16_t flags;
4320
4321         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4322
4323         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4324         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4325
4326         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4327         req->num_cmpl_dma_aggr_during_int =
4328                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4329
4330         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4331
4332         /* min timer set to 1/2 of interrupt timer */
4333         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4334
4335         /* buf timer set to 1/4 of interrupt timer */
4336         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4337
4338         req->cmpl_aggr_dma_tmr_during_int =
4339                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4340
4341         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4342                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4343         req->flags = rte_cpu_to_le_16(flags);
4344 }
4345
4346 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4347                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4348 {
4349         struct hwrm_ring_aggint_qcaps_input req = {0};
4350         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4351         uint32_t enables;
4352         uint16_t flags;
4353         int rc;
4354
4355         HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4356         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4357         HWRM_CHECK_RESULT();
4358
4359         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4360         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4361
4362         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4363                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4364         agg_req->flags = rte_cpu_to_le_16(flags);
4365         enables =
4366          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4367          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4368         agg_req->enables = rte_cpu_to_le_32(enables);
4369
4370         HWRM_UNLOCK();
4371         return rc;
4372 }
4373
4374 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4375                         struct bnxt_coal *coal, uint16_t ring_id)
4376 {
4377         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4378         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4379                                                 bp->hwrm_cmd_resp_addr;
4380         int rc;
4381
4382         /* Set ring coalesce parameters only for 100G NICs */
4383         if (BNXT_CHIP_THOR(bp)) {
4384                 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4385                         return -1;
4386         } else if (bnxt_stratus_device(bp)) {
4387                 bnxt_hwrm_set_coal_params(coal, &req);
4388         } else {
4389                 return 0;
4390         }
4391
4392         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
4393         req.ring_id = rte_cpu_to_le_16(ring_id);
4394         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4395         HWRM_CHECK_RESULT();
4396         HWRM_UNLOCK();
4397         return 0;
4398 }
4399
4400 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4401 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4402 {
4403         struct hwrm_func_backing_store_qcaps_input req = {0};
4404         struct hwrm_func_backing_store_qcaps_output *resp =
4405                 bp->hwrm_cmd_resp_addr;
4406         struct bnxt_ctx_pg_info *ctx_pg;
4407         struct bnxt_ctx_mem_info *ctx;
4408         int total_alloc_len;
4409         int rc, i;
4410
4411         if (!BNXT_CHIP_THOR(bp) ||
4412             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4413             BNXT_VF(bp) ||
4414             bp->ctx)
4415                 return 0;
4416
4417         HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4418         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4419         HWRM_CHECK_RESULT_SILENT();
4420
4421         total_alloc_len = sizeof(*ctx);
4422         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
4423                           RTE_CACHE_LINE_SIZE);
4424         if (!ctx) {
4425                 rc = -ENOMEM;
4426                 goto ctx_err;
4427         }
4428
4429         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4430                             sizeof(*ctx_pg) * BNXT_MAX_Q,
4431                             RTE_CACHE_LINE_SIZE);
4432         if (!ctx_pg) {
4433                 rc = -ENOMEM;
4434                 goto ctx_err;
4435         }
4436         for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
4437                 ctx->tqm_mem[i] = ctx_pg;
4438
4439         bp->ctx = ctx;
4440         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4441         ctx->qp_min_qp1_entries =
4442                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4443         ctx->qp_max_l2_entries =
4444                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
4445         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4446         ctx->srq_max_l2_entries =
4447                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
4448         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4449         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4450         ctx->cq_max_l2_entries =
4451                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
4452         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4453         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4454         ctx->vnic_max_vnic_entries =
4455                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4456         ctx->vnic_max_ring_table_entries =
4457                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4458         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4459         ctx->stat_max_entries =
4460                 rte_le_to_cpu_32(resp->stat_max_entries);
4461         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4462         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4463         ctx->tqm_min_entries_per_ring =
4464                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4465         ctx->tqm_max_entries_per_ring =
4466                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4467         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4468         if (!ctx->tqm_entries_multiple)
4469                 ctx->tqm_entries_multiple = 1;
4470         ctx->mrav_max_entries =
4471                 rte_le_to_cpu_32(resp->mrav_max_entries);
4472         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4473         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4474         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4475 ctx_err:
4476         HWRM_UNLOCK();
4477         return rc;
4478 }
4479
4480 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4481 {
4482         struct hwrm_func_backing_store_cfg_input req = {0};
4483         struct hwrm_func_backing_store_cfg_output *resp =
4484                 bp->hwrm_cmd_resp_addr;
4485         struct bnxt_ctx_mem_info *ctx = bp->ctx;
4486         struct bnxt_ctx_pg_info *ctx_pg;
4487         uint32_t *num_entries;
4488         uint64_t *pg_dir;
4489         uint8_t *pg_attr;
4490         uint32_t ena;
4491         int i, rc;
4492
4493         if (!ctx)
4494                 return 0;
4495
4496         HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4497         req.enables = rte_cpu_to_le_32(enables);
4498
4499         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4500                 ctx_pg = &ctx->qp_mem;
4501                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4502                 req.qp_num_qp1_entries =
4503                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4504                 req.qp_num_l2_entries =
4505                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4506                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4507                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4508                                       &req.qpc_pg_size_qpc_lvl,
4509                                       &req.qpc_page_dir);
4510         }
4511
4512         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4513                 ctx_pg = &ctx->srq_mem;
4514                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4515                 req.srq_num_l2_entries =
4516                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4517                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4518                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4519                                       &req.srq_pg_size_srq_lvl,
4520                                       &req.srq_page_dir);
4521         }
4522
4523         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4524                 ctx_pg = &ctx->cq_mem;
4525                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4526                 req.cq_num_l2_entries =
4527                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4528                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4529                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4530                                       &req.cq_pg_size_cq_lvl,
4531                                       &req.cq_page_dir);
4532         }
4533
4534         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4535                 ctx_pg = &ctx->vnic_mem;
4536                 req.vnic_num_vnic_entries =
4537                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4538                 req.vnic_num_ring_table_entries =
4539                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4540                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4541                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4542                                       &req.vnic_pg_size_vnic_lvl,
4543                                       &req.vnic_page_dir);
4544         }
4545
4546         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4547                 ctx_pg = &ctx->stat_mem;
4548                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4549                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4550                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4551                                       &req.stat_pg_size_stat_lvl,
4552                                       &req.stat_page_dir);
4553         }
4554
4555         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4556         num_entries = &req.tqm_sp_num_entries;
4557         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
4558         pg_dir = &req.tqm_sp_page_dir;
4559         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
4560         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
4561                 if (!(enables & ena))
4562                         continue;
4563
4564                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4565
4566                 ctx_pg = ctx->tqm_mem[i];
4567                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
4568                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
4569         }
4570
4571         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4572         HWRM_CHECK_RESULT();
4573         HWRM_UNLOCK();
4574
4575         return rc;
4576 }
4577
4578 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
4579 {
4580         struct hwrm_port_qstats_ext_input req = {0};
4581         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
4582         struct bnxt_pf_info *pf = &bp->pf;
4583         int rc;
4584
4585         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
4586               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
4587                 return 0;
4588
4589         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
4590
4591         req.port_id = rte_cpu_to_le_16(pf->port_id);
4592         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
4593                 req.tx_stat_host_addr =
4594                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
4595                 req.tx_stat_size =
4596                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
4597         }
4598         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
4599                 req.rx_stat_host_addr =
4600                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
4601                 req.rx_stat_size =
4602                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
4603         }
4604         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4605
4606         if (rc) {
4607                 bp->fw_rx_port_stats_ext_size = 0;
4608                 bp->fw_tx_port_stats_ext_size = 0;
4609         } else {
4610                 bp->fw_rx_port_stats_ext_size =
4611                         rte_le_to_cpu_16(resp->rx_stat_size);
4612                 bp->fw_tx_port_stats_ext_size =
4613                         rte_le_to_cpu_16(resp->tx_stat_size);
4614         }
4615
4616         HWRM_CHECK_RESULT();
4617         HWRM_UNLOCK();
4618
4619         return rc;
4620 }
4621
4622 int
4623 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
4624 {
4625         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
4626         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
4627                 bp->hwrm_cmd_resp_addr;
4628         int rc = 0;
4629
4630         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
4631         req.tunnel_type = type;
4632         req.dest_fid = bp->fw_fid;
4633         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4634         HWRM_CHECK_RESULT();
4635
4636         HWRM_UNLOCK();
4637
4638         return rc;
4639 }
4640
4641 int
4642 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
4643 {
4644         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
4645         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
4646                 bp->hwrm_cmd_resp_addr;
4647         int rc = 0;
4648
4649         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
4650         req.tunnel_type = type;
4651         req.dest_fid = bp->fw_fid;
4652         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4653         HWRM_CHECK_RESULT();
4654
4655         HWRM_UNLOCK();
4656
4657         return rc;
4658 }
4659
4660 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
4661 {
4662         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
4663         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
4664                 bp->hwrm_cmd_resp_addr;
4665         int rc = 0;
4666
4667         HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
4668         req.src_fid = bp->fw_fid;
4669         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4670         HWRM_CHECK_RESULT();
4671
4672         if (type)
4673                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
4674
4675         HWRM_UNLOCK();
4676
4677         return rc;
4678 }
4679
4680 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
4681                                    uint16_t *dst_fid)
4682 {
4683         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
4684         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
4685                 bp->hwrm_cmd_resp_addr;
4686         int rc = 0;
4687
4688         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
4689         req.src_fid = bp->fw_fid;
4690         req.tunnel_type = tun_type;
4691         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4692         HWRM_CHECK_RESULT();
4693
4694         if (dst_fid)
4695                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
4696
4697         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
4698
4699         HWRM_UNLOCK();
4700
4701         return rc;
4702 }
4703
4704 int bnxt_hwrm_set_mac(struct bnxt *bp)
4705 {
4706         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4707         struct hwrm_func_vf_cfg_input req = {0};
4708         int rc = 0;
4709
4710         if (!BNXT_VF(bp))
4711                 return 0;
4712
4713         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4714
4715         req.enables =
4716                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
4717         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4718
4719         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4720
4721         HWRM_CHECK_RESULT();
4722
4723         memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4724         HWRM_UNLOCK();
4725
4726         return rc;
4727 }
4728
4729 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
4730 {
4731         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
4732         struct hwrm_func_drv_if_change_input req = {0};
4733         uint32_t flags;
4734         int rc;
4735
4736         if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
4737                 return 0;
4738
4739         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
4740          * If we issue FUNC_DRV_IF_CHANGE with flags down before
4741          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
4742          */
4743         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
4744                 return 0;
4745
4746         HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
4747
4748         if (up)
4749                 req.flags =
4750                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
4751
4752         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4753
4754         HWRM_CHECK_RESULT();
4755         flags = rte_le_to_cpu_32(resp->flags);
4756         HWRM_UNLOCK();
4757
4758         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
4759                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
4760                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
4761         }
4762
4763         return 0;
4764 }
4765
4766 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
4767 {
4768         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4769         struct bnxt_error_recovery_info *info = bp->recovery_info;
4770         struct hwrm_error_recovery_qcfg_input req = {0};
4771         uint32_t flags = 0;
4772         unsigned int i;
4773         int rc;
4774
4775         /* Older FW does not have error recovery support */
4776         if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
4777                 return 0;
4778
4779         if (!info) {
4780                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4781                                    sizeof(*info), 0);
4782                 bp->recovery_info = info;
4783                 if (info == NULL)
4784                         return -ENOMEM;
4785         } else {
4786                 memset(info, 0, sizeof(*info));
4787         }
4788
4789         HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
4790
4791         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4792
4793         HWRM_CHECK_RESULT();
4794
4795         flags = rte_le_to_cpu_32(resp->flags);
4796         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
4797                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
4798         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
4799                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
4800
4801         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
4802             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
4803                 rc = -EINVAL;
4804                 goto err;
4805         }
4806
4807         /* FW returned values are in units of 100msec */
4808         info->driver_polling_freq =
4809                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
4810         info->master_func_wait_period =
4811                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
4812         info->normal_func_wait_period =
4813                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
4814         info->master_func_wait_period_after_reset =
4815                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
4816         info->max_bailout_time_after_reset =
4817                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
4818         info->status_regs[BNXT_FW_STATUS_REG] =
4819                 rte_le_to_cpu_32(resp->fw_health_status_reg);
4820         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
4821                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
4822         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
4823                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
4824         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
4825                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
4826         info->reg_array_cnt =
4827                 rte_le_to_cpu_32(resp->reg_array_cnt);
4828
4829         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
4830                 rc = -EINVAL;
4831                 goto err;
4832         }
4833
4834         for (i = 0; i < info->reg_array_cnt; i++) {
4835                 info->reset_reg[i] =
4836                         rte_le_to_cpu_32(resp->reset_reg[i]);
4837                 info->reset_reg_val[i] =
4838                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
4839                 info->delay_after_reset[i] =
4840                         resp->delay_after_reset[i];
4841         }
4842 err:
4843         HWRM_UNLOCK();
4844
4845         /* Map the FW status registers */
4846         if (!rc)
4847                 rc = bnxt_map_fw_health_status_regs(bp);
4848
4849         if (rc) {
4850                 rte_free(bp->recovery_info);
4851                 bp->recovery_info = NULL;
4852         }
4853         return rc;
4854 }
4855
4856 int bnxt_hwrm_fw_reset(struct bnxt *bp)
4857 {
4858         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
4859         struct hwrm_fw_reset_input req = {0};
4860         int rc;
4861
4862         if (!BNXT_PF(bp))
4863                 return -EOPNOTSUPP;
4864
4865         HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp));
4866
4867         req.embedded_proc_type =
4868                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
4869         req.selfrst_status =
4870                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
4871         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
4872
4873         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4874                                     BNXT_USE_KONG(bp));
4875
4876         HWRM_CHECK_RESULT();
4877         HWRM_UNLOCK();
4878
4879         return rc;
4880 }
4881
4882 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
4883 {
4884         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
4885         struct hwrm_port_ts_query_input req = {0};
4886         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4887         uint32_t flags = 0;
4888         int rc;
4889
4890         if (!ptp)
4891                 return 0;
4892
4893         HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
4894
4895         switch (path) {
4896         case BNXT_PTP_FLAGS_PATH_TX:
4897                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
4898                 break;
4899         case BNXT_PTP_FLAGS_PATH_RX:
4900                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
4901                 break;
4902         case BNXT_PTP_FLAGS_CURRENT_TIME:
4903                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
4904                 break;
4905         }
4906
4907         req.flags = rte_cpu_to_le_32(flags);
4908         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
4909
4910         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4911
4912         HWRM_CHECK_RESULT();
4913
4914         if (timestamp) {
4915                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
4916                 *timestamp |=
4917                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
4918         }
4919         HWRM_UNLOCK();
4920
4921         return rc;
4922 }
4923
4924 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
4925 {
4926         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
4927                                         bp->hwrm_cmd_resp_addr;
4928         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
4929         uint32_t flags = 0;
4930         int rc = 0;
4931
4932         if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT))
4933                 return rc;
4934
4935         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
4936                 PMD_DRV_LOG(DEBUG,
4937                             "Not a PF or trusted VF. Command not supported\n");
4938                 return 0;
4939         }
4940
4941         HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
4942         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4943
4944         HWRM_CHECK_RESULT();
4945         flags = rte_le_to_cpu_32(resp->flags);
4946         HWRM_UNLOCK();
4947
4948         if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) {
4949                 bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN;
4950                 PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n");
4951         }
4952
4953         return rc;
4954 }