5366fe72ca93b3d2bd19ea839f7634d0274651de
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #define HWRM_SPEC_CODE_1_8_3            0x10803
28 #define HWRM_VERSION_1_9_1              0x10901
29 #define HWRM_VERSION_1_9_2              0x10903
30
31 struct bnxt_plcmodes_cfg {
32         uint32_t        flags;
33         uint16_t        jumbo_thresh;
34         uint16_t        hds_offset;
35         uint16_t        hds_threshold;
36 };
37
38 static int page_getenum(size_t size)
39 {
40         if (size <= 1 << 4)
41                 return 4;
42         if (size <= 1 << 12)
43                 return 12;
44         if (size <= 1 << 13)
45                 return 13;
46         if (size <= 1 << 16)
47                 return 16;
48         if (size <= 1 << 21)
49                 return 21;
50         if (size <= 1 << 22)
51                 return 22;
52         if (size <= 1 << 30)
53                 return 30;
54         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55         return sizeof(int) * 8 - 1;
56 }
57
58 static int page_roundup(size_t size)
59 {
60         return 1 << page_getenum(size);
61 }
62
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64                                   uint8_t *pg_attr,
65                                   uint64_t *pg_dir)
66 {
67         if (rmem->nr_pages == 0)
68                 return;
69
70         if (rmem->nr_pages > 1) {
71                 *pg_attr = 1;
72                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
73         } else {
74                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
75         }
76 }
77
78 /*
79  * HWRM Functions (sent to HWRM)
80  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
81  * HWRM command times out, or a negative error code if the HWRM
82  * command was failed by the FW.
83  */
84
85 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
86                                   uint32_t msg_len, bool use_kong_mb)
87 {
88         unsigned int i;
89         struct input *req = msg;
90         struct output *resp = bp->hwrm_cmd_resp_addr;
91         uint32_t *data = msg;
92         uint8_t *bar;
93         uint8_t *valid;
94         uint16_t max_req_len = bp->max_req_len;
95         struct hwrm_short_input short_input = { 0 };
96         uint16_t bar_offset = use_kong_mb ?
97                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
98         uint16_t mb_trigger_offset = use_kong_mb ?
99                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
100         uint32_t timeout;
101
102         /* Do not send HWRM commands to firmware in error state */
103         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
104                 return 0;
105
106         timeout = bp->hwrm_cmd_timeout;
107
108         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
109             msg_len > bp->max_req_len) {
110                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
111
112                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
113                 memcpy(short_cmd_req, req, msg_len);
114
115                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
116                 short_input.signature = rte_cpu_to_le_16(
117                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
118                 short_input.size = rte_cpu_to_le_16(msg_len);
119                 short_input.req_addr =
120                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
121
122                 data = (uint32_t *)&short_input;
123                 msg_len = sizeof(short_input);
124
125                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
126         }
127
128         /* Write request msg to hwrm channel */
129         for (i = 0; i < msg_len; i += 4) {
130                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
131                 rte_write32(*data, bar);
132                 data++;
133         }
134
135         /* Zero the rest of the request space */
136         for (; i < max_req_len; i += 4) {
137                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
138                 rte_write32(0, bar);
139         }
140
141         /* Ring channel doorbell */
142         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
143         rte_write32(1, bar);
144         /*
145          * Make sure the channel doorbell ring command complete before
146          * reading the response to avoid getting stale or invalid
147          * responses.
148          */
149         rte_io_mb();
150
151         /* Poll for the valid bit */
152         for (i = 0; i < timeout; i++) {
153                 /* Sanity check on the resp->resp_len */
154                 rte_io_rmb();
155                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
156                         /* Last byte of resp contains the valid key */
157                         valid = (uint8_t *)resp + resp->resp_len - 1;
158                         if (*valid == HWRM_RESP_VALID_KEY)
159                                 break;
160                 }
161                 rte_delay_us(1);
162         }
163
164         if (i >= timeout) {
165                 /* Suppress VER_GET timeout messages during reset recovery */
166                 if (bp->flags & BNXT_FLAG_FW_RESET &&
167                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
168                         return -ETIMEDOUT;
169
170                 PMD_DRV_LOG(ERR,
171                             "Error(timeout) sending msg 0x%04x, seq_id %d\n",
172                             req->req_type, req->seq_id);
173                 return -ETIMEDOUT;
174         }
175         return 0;
176 }
177
178 /*
179  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
180  * spinlock, and does initial processing.
181  *
182  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
183  * releases the spinlock only if it returns. If the regular int return codes
184  * are not used by the function, HWRM_CHECK_RESULT() should not be used
185  * directly, rather it should be copied and modified to suit the function.
186  *
187  * HWRM_UNLOCK() must be called after all response processing is completed.
188  */
189 #define HWRM_PREP(req, type, kong) do { \
190         rte_spinlock_lock(&bp->hwrm_lock); \
191         if (bp->hwrm_cmd_resp_addr == NULL) { \
192                 rte_spinlock_unlock(&bp->hwrm_lock); \
193                 return -EACCES; \
194         } \
195         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
196         (req)->req_type = rte_cpu_to_le_16(type); \
197         (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
198         (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
199                 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
200         (req)->target_id = rte_cpu_to_le_16(0xffff); \
201         (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
202 } while (0)
203
204 #define HWRM_CHECK_RESULT_SILENT() do {\
205         if (rc) { \
206                 rte_spinlock_unlock(&bp->hwrm_lock); \
207                 return rc; \
208         } \
209         if (resp->error_code) { \
210                 rc = rte_le_to_cpu_16(resp->error_code); \
211                 rte_spinlock_unlock(&bp->hwrm_lock); \
212                 return rc; \
213         } \
214 } while (0)
215
216 #define HWRM_CHECK_RESULT() do {\
217         if (rc) { \
218                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
219                 rte_spinlock_unlock(&bp->hwrm_lock); \
220                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
221                         rc = -EACCES; \
222                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
223                         rc = -ENOSPC; \
224                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
225                         rc = -EINVAL; \
226                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
227                         rc = -ENOTSUP; \
228                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
229                         rc = -EAGAIN; \
230                 else if (rc > 0) \
231                         rc = -EIO; \
232                 return rc; \
233         } \
234         if (resp->error_code) { \
235                 rc = rte_le_to_cpu_16(resp->error_code); \
236                 if (resp->resp_len >= 16) { \
237                         struct hwrm_err_output *tmp_hwrm_err_op = \
238                                                 (void *)resp; \
239                         PMD_DRV_LOG(ERR, \
240                                 "error %d:%d:%08x:%04x\n", \
241                                 rc, tmp_hwrm_err_op->cmd_err, \
242                                 rte_le_to_cpu_32(\
243                                         tmp_hwrm_err_op->opaque_0), \
244                                 rte_le_to_cpu_16(\
245                                         tmp_hwrm_err_op->opaque_1)); \
246                 } else { \
247                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
248                 } \
249                 rte_spinlock_unlock(&bp->hwrm_lock); \
250                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
251                         rc = -EACCES; \
252                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
253                         rc = -ENOSPC; \
254                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
255                         rc = -EINVAL; \
256                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
257                         rc = -ENOTSUP; \
258                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
259                         rc = -EAGAIN; \
260                 else if (rc > 0) \
261                         rc = -EIO; \
262                 return rc; \
263         } \
264 } while (0)
265
266 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
267
268 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
269                                 bool use_kong_mb,
270                                 uint16_t msg_type,
271                                 void *msg,
272                                 uint32_t msg_len,
273                                 void *resp_msg,
274                                 uint32_t resp_len)
275 {
276         int rc = 0;
277         bool mailbox = BNXT_USE_CHIMP_MB;
278         struct input *req = msg;
279         struct output *resp = bp->hwrm_cmd_resp_addr;
280
281         if (use_kong_mb)
282                 mailbox = BNXT_USE_KONG(bp);
283
284         HWRM_PREP(req, msg_type, mailbox);
285
286         rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
287
288         HWRM_CHECK_RESULT();
289
290         if (resp_msg)
291                 memcpy(resp_msg, resp, resp_len);
292
293         HWRM_UNLOCK();
294
295         return rc;
296 }
297
298 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
299                                   bool use_kong_mb,
300                                   uint16_t tf_type,
301                                   uint16_t tf_subtype,
302                                   uint32_t *tf_response_code,
303                                   void *msg,
304                                   uint32_t msg_len,
305                                   void *response,
306                                   uint32_t response_len)
307 {
308         int rc = 0;
309         struct hwrm_cfa_tflib_input req = { .req_type = 0 };
310         struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
311         bool mailbox = BNXT_USE_CHIMP_MB;
312
313         if (msg_len > sizeof(req.tf_req))
314                 return -ENOMEM;
315
316         if (use_kong_mb)
317                 mailbox = BNXT_USE_KONG(bp);
318
319         HWRM_PREP(&req, HWRM_TF, mailbox);
320         /* Build request using the user supplied request payload.
321          * TLV request size is checked at build time against HWRM
322          * request max size, thus no checking required.
323          */
324         req.tf_type = tf_type;
325         req.tf_subtype = tf_subtype;
326         memcpy(req.tf_req, msg, msg_len);
327
328         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
329         HWRM_CHECK_RESULT();
330
331         /* Copy the resp to user provided response buffer */
332         if (response != NULL)
333                 /* Post process response data. We need to copy only
334                  * the 'payload' as the HWRM data structure really is
335                  * HWRM header + msg header + payload and the TFLIB
336                  * only provided a payload place holder.
337                  */
338                 if (response_len != 0) {
339                         memcpy(response,
340                                resp->tf_resp,
341                                response_len);
342                 }
343
344         /* Extract the internal tflib response code */
345         *tf_response_code = resp->tf_resp_code;
346         HWRM_UNLOCK();
347
348         return rc;
349 }
350
351 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
352 {
353         int rc = 0;
354         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
355         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
356
357         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
358         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
359         req.mask = 0;
360
361         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
362
363         HWRM_CHECK_RESULT();
364         HWRM_UNLOCK();
365
366         return rc;
367 }
368
369 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
370                                  struct bnxt_vnic_info *vnic,
371                                  uint16_t vlan_count,
372                                  struct bnxt_vlan_table_entry *vlan_table)
373 {
374         int rc = 0;
375         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
376         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
377         uint32_t mask = 0;
378
379         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
380                 return rc;
381
382         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
383         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
384
385         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
386                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
387         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
388                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
389
390         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
391                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
392
393         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
394                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
395         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
396                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
397                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
398                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
399         }
400         if (vlan_table) {
401                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
402                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
403                 req.vlan_tag_tbl_addr =
404                         rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
405                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
406         }
407         req.mask = rte_cpu_to_le_32(mask);
408
409         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
410
411         HWRM_CHECK_RESULT();
412         HWRM_UNLOCK();
413
414         return rc;
415 }
416
417 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
418                         uint16_t vlan_count,
419                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
420 {
421         int rc = 0;
422         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
423         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
424                                                 bp->hwrm_cmd_resp_addr;
425
426         /*
427          * Older HWRM versions did not support this command, and the set_rx_mask
428          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
429          * removed from set_rx_mask call, and this command was added.
430          *
431          * This command is also present from 1.7.8.11 and higher,
432          * as well as 1.7.8.0
433          */
434         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
435                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
436                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
437                                         (11)))
438                                 return 0;
439                 }
440         }
441         HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
442         req.fid = rte_cpu_to_le_16(fid);
443
444         req.vlan_tag_mask_tbl_addr =
445                 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
446         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
447
448         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
449
450         HWRM_CHECK_RESULT();
451         HWRM_UNLOCK();
452
453         return rc;
454 }
455
456 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
457                              struct bnxt_filter_info *filter)
458 {
459         int rc = 0;
460         struct bnxt_filter_info *l2_filter = filter;
461         struct bnxt_vnic_info *vnic = NULL;
462         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
463         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
464
465         if (filter->fw_l2_filter_id == UINT64_MAX)
466                 return 0;
467
468         if (filter->matching_l2_fltr_ptr)
469                 l2_filter = filter->matching_l2_fltr_ptr;
470
471         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
472                     filter, l2_filter, l2_filter->l2_ref_cnt);
473
474         if (l2_filter->l2_ref_cnt == 0)
475                 return 0;
476
477         if (l2_filter->l2_ref_cnt > 0)
478                 l2_filter->l2_ref_cnt--;
479
480         if (l2_filter->l2_ref_cnt > 0)
481                 return 0;
482
483         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
484
485         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
486
487         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
488
489         HWRM_CHECK_RESULT();
490         HWRM_UNLOCK();
491
492         filter->fw_l2_filter_id = UINT64_MAX;
493         if (l2_filter->l2_ref_cnt == 0) {
494                 vnic = l2_filter->vnic;
495                 if (vnic) {
496                         STAILQ_REMOVE(&vnic->filter, l2_filter,
497                                       bnxt_filter_info, next);
498                         bnxt_free_filter(bp, l2_filter);
499                 }
500         }
501
502         return 0;
503 }
504
505 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
506                          uint16_t dst_id,
507                          struct bnxt_filter_info *filter)
508 {
509         int rc = 0;
510         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
511         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
512         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
513         const struct rte_eth_vmdq_rx_conf *conf =
514                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
515         uint32_t enables = 0;
516         uint16_t j = dst_id - 1;
517
518         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
519         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
520             conf->pool_map[j].pools & (1UL << j)) {
521                 PMD_DRV_LOG(DEBUG,
522                         "Add vlan %u to vmdq pool %u\n",
523                         conf->pool_map[j].vlan_id, j);
524
525                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
526                 filter->enables |=
527                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
528                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
529         }
530
531         if (filter->fw_l2_filter_id != UINT64_MAX)
532                 bnxt_hwrm_clear_l2_filter(bp, filter);
533
534         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
535
536         /* PMD does not support XDP and RoCE */
537         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
538                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
539         req.flags = rte_cpu_to_le_32(filter->flags);
540
541         enables = filter->enables |
542               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
543         req.dst_id = rte_cpu_to_le_16(dst_id);
544
545         if (enables &
546             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
547                 memcpy(req.l2_addr, filter->l2_addr,
548                        RTE_ETHER_ADDR_LEN);
549         if (enables &
550             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
551                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
552                        RTE_ETHER_ADDR_LEN);
553         if (enables &
554             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
555                 req.l2_ovlan = filter->l2_ovlan;
556         if (enables &
557             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
558                 req.l2_ivlan = filter->l2_ivlan;
559         if (enables &
560             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
561                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
562         if (enables &
563             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
564                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
565         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
566                 req.src_id = rte_cpu_to_le_32(filter->src_id);
567         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
568                 req.src_type = filter->src_type;
569         if (filter->pri_hint) {
570                 req.pri_hint = filter->pri_hint;
571                 req.l2_filter_id_hint =
572                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
573         }
574
575         req.enables = rte_cpu_to_le_32(enables);
576
577         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
578
579         HWRM_CHECK_RESULT();
580
581         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
582         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
583         HWRM_UNLOCK();
584
585         filter->l2_ref_cnt++;
586
587         return rc;
588 }
589
590 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
591 {
592         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
593         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
594         uint32_t flags = 0;
595         int rc;
596
597         if (!ptp)
598                 return 0;
599
600         HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
601
602         if (ptp->rx_filter)
603                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
604         else
605                 flags |=
606                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
607         if (ptp->tx_tstamp_en)
608                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
609         else
610                 flags |=
611                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
612         req.flags = rte_cpu_to_le_32(flags);
613         req.enables = rte_cpu_to_le_32
614                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
615         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
616
617         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
618         HWRM_UNLOCK();
619
620         return rc;
621 }
622
623 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
624 {
625         int rc = 0;
626         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
627         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
628         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
629
630         if (ptp)
631                 return 0;
632
633         HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
634
635         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
636
637         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
638
639         HWRM_CHECK_RESULT();
640
641         if (!BNXT_CHIP_P5(bp) &&
642             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
643                 return 0;
644
645         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
646                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
647
648         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
649         if (!ptp)
650                 return -ENOMEM;
651
652         if (!BNXT_CHIP_P5(bp)) {
653                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
654                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
655                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
656                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
657                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
658                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
659                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
660                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
661                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
662                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
663                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
664                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
665                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
666                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
667                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
668                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
669                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
670                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
671         }
672
673         ptp->bp = bp;
674         bp->ptp_cfg = ptp;
675
676         return 0;
677 }
678
679 void bnxt_hwrm_free_vf_info(struct bnxt *bp)
680 {
681         int i;
682
683         for (i = 0; i < bp->pf->max_vfs; i++) {
684                 rte_free(bp->pf->vf_info[i].vlan_table);
685                 bp->pf->vf_info[i].vlan_table = NULL;
686                 rte_free(bp->pf->vf_info[i].vlan_as_table);
687                 bp->pf->vf_info[i].vlan_as_table = NULL;
688         }
689         rte_free(bp->pf->vf_info);
690         bp->pf->vf_info = NULL;
691 }
692
693 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
694 {
695         int rc = 0;
696         struct hwrm_func_qcaps_input req = {.req_type = 0 };
697         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
698         uint16_t new_max_vfs;
699         uint32_t flags;
700         int i;
701
702         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
703
704         req.fid = rte_cpu_to_le_16(0xffff);
705
706         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
707
708         HWRM_CHECK_RESULT();
709
710         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
711         flags = rte_le_to_cpu_32(resp->flags);
712         if (BNXT_PF(bp)) {
713                 bp->pf->port_id = resp->port_id;
714                 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
715                 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
716                 new_max_vfs = bp->pdev->max_vfs;
717                 if (new_max_vfs != bp->pf->max_vfs) {
718                         if (bp->pf->vf_info)
719                                 bnxt_hwrm_free_vf_info(bp);
720                         bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
721                             sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
722                         if (bp->pf->vf_info == NULL) {
723                                 PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
724                                 HWRM_UNLOCK();
725                                 return -ENOMEM;
726                         }
727                         bp->pf->max_vfs = new_max_vfs;
728                         for (i = 0; i < new_max_vfs; i++) {
729                                 bp->pf->vf_info[i].fid =
730                                         bp->pf->first_vf_id + i;
731                                 bp->pf->vf_info[i].vlan_table =
732                                         rte_zmalloc("VF VLAN table",
733                                                     getpagesize(),
734                                                     getpagesize());
735                                 if (bp->pf->vf_info[i].vlan_table == NULL)
736                                         PMD_DRV_LOG(ERR,
737                                         "Fail to alloc VLAN table for VF %d\n",
738                                         i);
739                                 else
740                                         rte_mem_lock_page(
741                                                 bp->pf->vf_info[i].vlan_table);
742                                 bp->pf->vf_info[i].vlan_as_table =
743                                         rte_zmalloc("VF VLAN AS table",
744                                                     getpagesize(),
745                                                     getpagesize());
746                                 if (bp->pf->vf_info[i].vlan_as_table == NULL)
747                                         PMD_DRV_LOG(ERR,
748                                         "Alloc VLAN AS table for VF %d fail\n",
749                                         i);
750                                 else
751                                         rte_mem_lock_page(
752                                               bp->pf->vf_info[i].vlan_as_table);
753                                 STAILQ_INIT(&bp->pf->vf_info[i].filter);
754                         }
755                 }
756         }
757
758         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
759         if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
760                 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
761                 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
762         } else {
763                 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
764         }
765         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
766         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
767         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
768         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
769         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
770         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
771         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
772         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
773                 bp->max_l2_ctx += bp->max_rx_em_flows;
774         /* TODO: For now, do not support VMDq/RFS on VFs. */
775         if (BNXT_PF(bp)) {
776                 if (bp->pf->max_vfs)
777                         bp->max_vnics = 1;
778                 else
779                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
780         } else {
781                 bp->max_vnics = 1;
782         }
783         PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
784                     bp->max_l2_ctx, bp->max_vnics);
785         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
786         if (BNXT_PF(bp)) {
787                 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
788                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
789                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
790                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
791                         HWRM_UNLOCK();
792                         bnxt_hwrm_ptp_qcfg(bp);
793                 }
794         }
795
796         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
797                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
798
799         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
800                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
801                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
802         }
803
804         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
805                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
806
807         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
808                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
809
810         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
811                 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
812
813         HWRM_UNLOCK();
814
815         return rc;
816 }
817
818 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
819 {
820         int rc;
821
822         rc = __bnxt_hwrm_func_qcaps(bp);
823         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
824                 rc = bnxt_alloc_ctx_mem(bp);
825                 if (rc)
826                         return rc;
827
828                 /* On older FW,
829                  * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
830                  * But the error can be ignored. Return success.
831                  */
832                 rc = bnxt_hwrm_func_resc_qcaps(bp);
833                 if (!rc)
834                         bp->flags |= BNXT_FLAG_NEW_RM;
835         }
836
837         return 0;
838 }
839
840 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
841 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
842 {
843         int rc = 0;
844         uint32_t flags;
845         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
846         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
847
848         HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
849
850         req.target_id = rte_cpu_to_le_16(0xffff);
851
852         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
853
854         HWRM_CHECK_RESULT();
855
856         flags = rte_le_to_cpu_32(resp->flags);
857
858         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
859                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
860                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
861         }
862
863         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
864                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
865
866         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
867                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
868
869         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
870
871         HWRM_UNLOCK();
872
873         return rc;
874 }
875
876 int bnxt_hwrm_func_reset(struct bnxt *bp)
877 {
878         int rc = 0;
879         struct hwrm_func_reset_input req = {.req_type = 0 };
880         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
881
882         HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
883
884         req.enables = rte_cpu_to_le_32(0);
885
886         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
887
888         HWRM_CHECK_RESULT();
889         HWRM_UNLOCK();
890
891         return rc;
892 }
893
894 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
895 {
896         int rc;
897         uint32_t flags = 0;
898         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
899         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
900
901         if (bp->flags & BNXT_FLAG_REGISTERED)
902                 return 0;
903
904         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
905                 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
906         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
907                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
908
909         /* PFs and trusted VFs should indicate the support of the
910          * Master capability on non Stingray platform
911          */
912         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
913                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
914
915         HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
916         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
917                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
918         req.ver_maj = RTE_VER_YEAR;
919         req.ver_min = RTE_VER_MONTH;
920         req.ver_upd = RTE_VER_MINOR;
921
922         if (BNXT_PF(bp)) {
923                 req.enables |= rte_cpu_to_le_32(
924                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
925                 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
926                        RTE_MIN(sizeof(req.vf_req_fwd),
927                                sizeof(bp->pf->vf_req_fwd)));
928         }
929
930         req.flags = rte_cpu_to_le_32(flags);
931
932         req.async_event_fwd[0] |=
933                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
934                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
935                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
936                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
937                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
938         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
939                 req.async_event_fwd[0] |=
940                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
941         req.async_event_fwd[1] |=
942                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
943                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
944         if (BNXT_PF(bp))
945                 req.async_event_fwd[1] |=
946                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
947
948         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
949                 req.async_event_fwd[1] |=
950                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
951
952         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
953
954         HWRM_CHECK_RESULT();
955
956         flags = rte_le_to_cpu_32(resp->flags);
957         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
958                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
959
960         HWRM_UNLOCK();
961
962         bp->flags |= BNXT_FLAG_REGISTERED;
963
964         return rc;
965 }
966
967 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
968 {
969         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
970                 return 0;
971
972         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
973 }
974
975 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
976 {
977         int rc;
978         uint32_t flags = 0;
979         uint32_t enables;
980         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
981         struct hwrm_func_vf_cfg_input req = {0};
982
983         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
984
985         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
986                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
987                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
988                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
989                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
990
991         if (BNXT_HAS_RING_GRPS(bp)) {
992                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
993                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
994         }
995
996         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
997         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
998                                             AGG_RING_MULTIPLIER);
999         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
1000         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
1001                                               bp->tx_nr_rings +
1002                                               BNXT_NUM_ASYNC_CPR(bp));
1003         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
1004         if (bp->vf_resv_strategy ==
1005             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1006                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1007                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1008                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1009                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1010                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1011                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1012         } else if (bp->vf_resv_strategy ==
1013                    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1014                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1015                 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1016         }
1017
1018         if (test)
1019                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1020                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1021                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1022                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1023                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1024                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1025
1026         if (test && BNXT_HAS_RING_GRPS(bp))
1027                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1028
1029         req.flags = rte_cpu_to_le_32(flags);
1030         req.enables |= rte_cpu_to_le_32(enables);
1031
1032         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1033
1034         if (test)
1035                 HWRM_CHECK_RESULT_SILENT();
1036         else
1037                 HWRM_CHECK_RESULT();
1038
1039         HWRM_UNLOCK();
1040         return rc;
1041 }
1042
1043 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1044 {
1045         int rc;
1046         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1047         struct hwrm_func_resource_qcaps_input req = {0};
1048
1049         HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1050         req.fid = rte_cpu_to_le_16(0xffff);
1051
1052         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1053
1054         HWRM_CHECK_RESULT_SILENT();
1055
1056         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1057         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1058         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1059         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1060         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1061         /* func_resource_qcaps does not return max_rx_em_flows.
1062          * So use the value provided by func_qcaps.
1063          */
1064         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1065         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
1066                 bp->max_l2_ctx += bp->max_rx_em_flows;
1067         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1068         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1069         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1070         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1071         if (bp->vf_resv_strategy >
1072             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1073                 bp->vf_resv_strategy =
1074                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1075
1076         HWRM_UNLOCK();
1077         return rc;
1078 }
1079
1080 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1081 {
1082         int rc = 0;
1083         struct hwrm_ver_get_input req = {.req_type = 0 };
1084         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1085         uint32_t fw_version;
1086         uint16_t max_resp_len;
1087         char type[RTE_MEMZONE_NAMESIZE];
1088         uint32_t dev_caps_cfg;
1089
1090         bp->max_req_len = HWRM_MAX_REQ_LEN;
1091         bp->hwrm_cmd_timeout = timeout;
1092         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1093
1094         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1095         req.hwrm_intf_min = HWRM_VERSION_MINOR;
1096         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1097
1098         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1099
1100         if (bp->flags & BNXT_FLAG_FW_RESET)
1101                 HWRM_CHECK_RESULT_SILENT();
1102         else
1103                 HWRM_CHECK_RESULT();
1104
1105         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
1106                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1107                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1108                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
1109                 resp->hwrm_fw_rsvd_8b);
1110         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1111                      (resp->hwrm_fw_min_8b << 16) |
1112                      (resp->hwrm_fw_bld_8b << 8) |
1113                      resp->hwrm_fw_rsvd_8b;
1114         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1115                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1116
1117         fw_version = resp->hwrm_intf_maj_8b << 16;
1118         fw_version |= resp->hwrm_intf_min_8b << 8;
1119         fw_version |= resp->hwrm_intf_upd_8b;
1120         bp->hwrm_spec_code = fw_version;
1121
1122         /* def_req_timeout value is in milliseconds */
1123         bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1124         /* convert timeout to usec */
1125         bp->hwrm_cmd_timeout *= 1000;
1126         if (!bp->hwrm_cmd_timeout)
1127                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1128
1129         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1130                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1131                 rc = -EINVAL;
1132                 goto error;
1133         }
1134
1135         if (bp->max_req_len > resp->max_req_win_len) {
1136                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1137                 rc = -EINVAL;
1138         }
1139
1140         bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
1141
1142         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1143         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1144         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1145                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1146
1147         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1148         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1149
1150         if (bp->max_resp_len != max_resp_len) {
1151                 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1152                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1153                         bp->pdev->addr.devid, bp->pdev->addr.function);
1154
1155                 rte_free(bp->hwrm_cmd_resp_addr);
1156
1157                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1158                 if (bp->hwrm_cmd_resp_addr == NULL) {
1159                         rc = -ENOMEM;
1160                         goto error;
1161                 }
1162                 bp->hwrm_cmd_resp_dma_addr =
1163                         rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1164                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1165                         PMD_DRV_LOG(ERR,
1166                         "Unable to map response buffer to physical memory.\n");
1167                         rc = -ENOMEM;
1168                         goto error;
1169                 }
1170                 bp->max_resp_len = max_resp_len;
1171         }
1172
1173         if ((dev_caps_cfg &
1174                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1175             (dev_caps_cfg &
1176              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1177                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1178                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1179         }
1180
1181         if (((dev_caps_cfg &
1182               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1183              (dev_caps_cfg &
1184               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1185             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1186                 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1187                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1188                         bp->pdev->addr.devid, bp->pdev->addr.function);
1189
1190                 rte_free(bp->hwrm_short_cmd_req_addr);
1191
1192                 bp->hwrm_short_cmd_req_addr =
1193                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1194                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1195                         rc = -ENOMEM;
1196                         goto error;
1197                 }
1198                 bp->hwrm_short_cmd_req_dma_addr =
1199                         rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1200                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1201                         rte_free(bp->hwrm_short_cmd_req_addr);
1202                         PMD_DRV_LOG(ERR,
1203                                 "Unable to map buffer to physical memory.\n");
1204                         rc = -ENOMEM;
1205                         goto error;
1206                 }
1207         }
1208         if (dev_caps_cfg &
1209             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1210                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1211                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1212         }
1213         if (dev_caps_cfg &
1214             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1215                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1216         if (dev_caps_cfg &
1217             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1218                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1219                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1220         }
1221
1222         if (dev_caps_cfg &
1223             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1224                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1225                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1226         }
1227
1228
1229 error:
1230         HWRM_UNLOCK();
1231         return rc;
1232 }
1233
1234 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1235 {
1236         int rc;
1237         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1238         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1239
1240         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1241                 return 0;
1242
1243         HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1244         req.flags = flags;
1245
1246         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1247
1248         HWRM_CHECK_RESULT();
1249         HWRM_UNLOCK();
1250
1251         return rc;
1252 }
1253
1254 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1255 {
1256         int rc = 0;
1257         struct hwrm_port_phy_cfg_input req = {0};
1258         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1259         uint32_t enables = 0;
1260
1261         HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1262
1263         if (conf->link_up) {
1264                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1265                 if (bp->link_info->auto_mode && conf->link_speed) {
1266                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1267                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1268                 }
1269
1270                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1271                 /*
1272                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1273                  * any auto mode, even "none".
1274                  */
1275                 if (!conf->link_speed) {
1276                         /* No speeds specified. Enable AutoNeg - all speeds */
1277                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1278                         req.auto_mode =
1279                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1280                 } else {
1281                         if (bp->link_info->link_signal_mode) {
1282                                 enables |=
1283                                 HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1284                                 req.force_pam4_link_speed =
1285                                         rte_cpu_to_le_16(conf->link_speed);
1286                         } else {
1287                                 req.force_link_speed =
1288                                         rte_cpu_to_le_16(conf->link_speed);
1289                         }
1290                 }
1291                 /* AutoNeg - Advertise speeds specified. */
1292                 if (conf->auto_link_speed_mask &&
1293                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1294                         req.auto_mode =
1295                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1296                         req.auto_link_speed_mask =
1297                                 conf->auto_link_speed_mask;
1298                         if (conf->auto_pam4_link_speeds) {
1299                                 enables |=
1300                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1301                                 req.auto_link_pam4_speed_mask =
1302                                         conf->auto_pam4_link_speeds;
1303                         } else {
1304                                 enables |=
1305                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1306                         }
1307                 }
1308                 if (conf->auto_link_speed &&
1309                 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1310                         enables |=
1311                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1312
1313                 req.auto_duplex = conf->duplex;
1314                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1315                 req.auto_pause = conf->auto_pause;
1316                 req.force_pause = conf->force_pause;
1317                 /* Set force_pause if there is no auto or if there is a force */
1318                 if (req.auto_pause && !req.force_pause)
1319                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1320                 else
1321                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1322
1323                 req.enables = rte_cpu_to_le_32(enables);
1324         } else {
1325                 req.flags =
1326                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1327                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1328         }
1329
1330         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1331
1332         HWRM_CHECK_RESULT();
1333         HWRM_UNLOCK();
1334
1335         return rc;
1336 }
1337
1338 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1339                                    struct bnxt_link_info *link_info)
1340 {
1341         int rc = 0;
1342         struct hwrm_port_phy_qcfg_input req = {0};
1343         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1344
1345         HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1346
1347         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1348
1349         HWRM_CHECK_RESULT();
1350
1351         link_info->phy_link_status = resp->link;
1352         link_info->link_up =
1353                 (link_info->phy_link_status ==
1354                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1355         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1356         link_info->duplex = resp->duplex_cfg;
1357         link_info->pause = resp->pause;
1358         link_info->auto_pause = resp->auto_pause;
1359         link_info->force_pause = resp->force_pause;
1360         link_info->auto_mode = resp->auto_mode;
1361         link_info->phy_type = resp->phy_type;
1362         link_info->media_type = resp->media_type;
1363
1364         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1365         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1366         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1367         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1368         link_info->phy_ver[0] = resp->phy_maj;
1369         link_info->phy_ver[1] = resp->phy_min;
1370         link_info->phy_ver[2] = resp->phy_bld;
1371         link_info->link_signal_mode =
1372                 rte_le_to_cpu_16(resp->active_fec_signal_mode);
1373         link_info->force_pam4_link_speed =
1374                         rte_le_to_cpu_16(resp->force_pam4_link_speed);
1375         link_info->support_pam4_speeds =
1376                         rte_le_to_cpu_16(resp->support_pam4_speeds);
1377         link_info->auto_pam4_link_speeds =
1378                         rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1379         HWRM_UNLOCK();
1380
1381         PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1382                     link_info->link_speed, link_info->auto_mode,
1383                     link_info->auto_link_speed, link_info->auto_link_speed_mask,
1384                     link_info->support_speeds, link_info->force_link_speed);
1385         PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
1386                     link_info->link_signal_mode,
1387                     link_info->auto_pam4_link_speeds,
1388                     link_info->support_pam4_speeds,
1389                     link_info->force_pam4_link_speed);
1390         return rc;
1391 }
1392
1393 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1394 {
1395         int rc = 0;
1396         struct hwrm_port_phy_qcaps_input req = {0};
1397         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1398         struct bnxt_link_info *link_info = bp->link_info;
1399
1400         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1401                 return 0;
1402
1403         HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1404
1405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1406
1407         HWRM_CHECK_RESULT();
1408
1409         bp->port_cnt = resp->port_cnt;
1410         if (resp->supported_speeds_auto_mode)
1411                 link_info->support_auto_speeds =
1412                         rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1413         if (resp->supported_pam4_speeds_auto_mode)
1414                 link_info->support_pam4_auto_speeds =
1415                         rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1416
1417         HWRM_UNLOCK();
1418
1419         return 0;
1420 }
1421
1422 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1423 {
1424         int i = 0;
1425
1426         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1427                 if (bp->tx_cos_queue[i].profile ==
1428                     HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1429                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1430                         return true;
1431                 }
1432         }
1433         return false;
1434 }
1435
1436 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1437 {
1438         int i = 0;
1439
1440         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1441                 if (bp->tx_cos_queue[i].profile !=
1442                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1443                     bp->tx_cos_queue[i].id !=
1444                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1445                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1446                         break;
1447                 }
1448         }
1449 }
1450
1451 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1452 {
1453         int rc = 0;
1454         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1455         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1456         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1457         int i;
1458
1459 get_rx_info:
1460         HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1461
1462         req.flags = rte_cpu_to_le_32(dir);
1463         /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1464         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1465             !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1466                 req.drv_qmap_cap =
1467                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1468         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1469
1470         HWRM_CHECK_RESULT();
1471
1472         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1473                 GET_TX_QUEUE_INFO(0);
1474                 GET_TX_QUEUE_INFO(1);
1475                 GET_TX_QUEUE_INFO(2);
1476                 GET_TX_QUEUE_INFO(3);
1477                 GET_TX_QUEUE_INFO(4);
1478                 GET_TX_QUEUE_INFO(5);
1479                 GET_TX_QUEUE_INFO(6);
1480                 GET_TX_QUEUE_INFO(7);
1481         } else  {
1482                 GET_RX_QUEUE_INFO(0);
1483                 GET_RX_QUEUE_INFO(1);
1484                 GET_RX_QUEUE_INFO(2);
1485                 GET_RX_QUEUE_INFO(3);
1486                 GET_RX_QUEUE_INFO(4);
1487                 GET_RX_QUEUE_INFO(5);
1488                 GET_RX_QUEUE_INFO(6);
1489                 GET_RX_QUEUE_INFO(7);
1490         }
1491
1492         HWRM_UNLOCK();
1493
1494         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1495                 goto done;
1496
1497         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1498                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1499         } else {
1500                 int j;
1501
1502                 /* iterate and find the COSq profile to use for Tx */
1503                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1504                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1505                                 if (bp->tx_cos_queue[i].id != 0xff)
1506                                         bp->tx_cosq_id[j++] =
1507                                                 bp->tx_cos_queue[i].id;
1508                         }
1509                 } else {
1510                         /* When CoS classification is disabled, for normal NIC
1511                          * operations, ideally we should look to use LOSSY.
1512                          * If not found, fallback to the first valid profile
1513                          */
1514                         if (!bnxt_find_lossy_profile(bp))
1515                                 bnxt_find_first_valid_profile(bp);
1516
1517                 }
1518         }
1519
1520         bp->max_tc = resp->max_configurable_queues;
1521         bp->max_lltc = resp->max_configurable_lossless_queues;
1522         if (bp->max_tc > BNXT_MAX_QUEUE)
1523                 bp->max_tc = BNXT_MAX_QUEUE;
1524         bp->max_q = bp->max_tc;
1525
1526         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1527                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1528                 goto get_rx_info;
1529         }
1530
1531 done:
1532         return rc;
1533 }
1534
1535 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1536                          struct bnxt_ring *ring,
1537                          uint32_t ring_type, uint32_t map_index,
1538                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1539                          uint16_t tx_cosq_id)
1540 {
1541         int rc = 0;
1542         uint32_t enables = 0;
1543         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1544         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1545         struct rte_mempool *mb_pool;
1546         uint16_t rx_buf_size;
1547
1548         HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1549
1550         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1551         req.fbo = rte_cpu_to_le_32(0);
1552         /* Association of ring index with doorbell index */
1553         req.logical_id = rte_cpu_to_le_16(map_index);
1554         req.length = rte_cpu_to_le_32(ring->ring_size);
1555
1556         switch (ring_type) {
1557         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1558                 req.ring_type = ring_type;
1559                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1560                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1561                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1562                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1563                         enables |=
1564                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1565                 break;
1566         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1567                 req.ring_type = ring_type;
1568                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1569                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1570                 if (BNXT_CHIP_P5(bp)) {
1571                         mb_pool = bp->rx_queues[0]->mb_pool;
1572                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1573                                       RTE_PKTMBUF_HEADROOM;
1574                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1575                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1576                         enables |=
1577                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1578                 }
1579                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1580                         enables |=
1581                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1582                 break;
1583         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1584                 req.ring_type = ring_type;
1585                 if (BNXT_HAS_NQ(bp)) {
1586                         /* Association of cp ring with nq */
1587                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1588                         enables |=
1589                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1590                 }
1591                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1592                 break;
1593         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1594                 req.ring_type = ring_type;
1595                 req.page_size = BNXT_PAGE_SHFT;
1596                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1597                 break;
1598         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1599                 req.ring_type = ring_type;
1600                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1601
1602                 mb_pool = bp->rx_queues[0]->mb_pool;
1603                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1604                               RTE_PKTMBUF_HEADROOM;
1605                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1606                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1607
1608                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1609                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1610                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1611                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1612                 break;
1613         default:
1614                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1615                         ring_type);
1616                 HWRM_UNLOCK();
1617                 return -EINVAL;
1618         }
1619         req.enables = rte_cpu_to_le_32(enables);
1620
1621         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1622
1623         if (rc || resp->error_code) {
1624                 if (rc == 0 && resp->error_code)
1625                         rc = rte_le_to_cpu_16(resp->error_code);
1626                 switch (ring_type) {
1627                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1628                         PMD_DRV_LOG(ERR,
1629                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1630                         HWRM_UNLOCK();
1631                         return rc;
1632                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1633                         PMD_DRV_LOG(ERR,
1634                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1635                         HWRM_UNLOCK();
1636                         return rc;
1637                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1638                         PMD_DRV_LOG(ERR,
1639                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1640                                     rc);
1641                         HWRM_UNLOCK();
1642                         return rc;
1643                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1644                         PMD_DRV_LOG(ERR,
1645                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1646                         HWRM_UNLOCK();
1647                         return rc;
1648                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1649                         PMD_DRV_LOG(ERR,
1650                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1651                         HWRM_UNLOCK();
1652                         return rc;
1653                 default:
1654                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1655                         HWRM_UNLOCK();
1656                         return rc;
1657                 }
1658         }
1659
1660         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1661         HWRM_UNLOCK();
1662         return rc;
1663 }
1664
1665 int bnxt_hwrm_ring_free(struct bnxt *bp,
1666                         struct bnxt_ring *ring, uint32_t ring_type)
1667 {
1668         int rc;
1669         struct hwrm_ring_free_input req = {.req_type = 0 };
1670         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1671
1672         HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1673
1674         req.ring_type = ring_type;
1675         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1676
1677         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1678
1679         if (rc || resp->error_code) {
1680                 if (rc == 0 && resp->error_code)
1681                         rc = rte_le_to_cpu_16(resp->error_code);
1682                 HWRM_UNLOCK();
1683
1684                 switch (ring_type) {
1685                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1686                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1687                                 rc);
1688                         return rc;
1689                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1690                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1691                                 rc);
1692                         return rc;
1693                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1694                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1695                                 rc);
1696                         return rc;
1697                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1698                         PMD_DRV_LOG(ERR,
1699                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1700                         return rc;
1701                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1702                         PMD_DRV_LOG(ERR,
1703                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1704                         return rc;
1705                 default:
1706                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1707                         return rc;
1708                 }
1709         }
1710         HWRM_UNLOCK();
1711         return 0;
1712 }
1713
1714 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1715 {
1716         int rc = 0;
1717         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1718         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1719
1720         HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1721
1722         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1723         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1724         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1725         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1726
1727         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1728
1729         HWRM_CHECK_RESULT();
1730
1731         bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1732
1733         HWRM_UNLOCK();
1734
1735         return rc;
1736 }
1737
1738 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1739 {
1740         int rc;
1741         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1742         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1743
1744         HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1745
1746         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1747
1748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1749
1750         HWRM_CHECK_RESULT();
1751         HWRM_UNLOCK();
1752
1753         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1754         return rc;
1755 }
1756
1757 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1758 {
1759         int rc = 0;
1760         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1761         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1762
1763         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1764                 return rc;
1765
1766         HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1767
1768         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1769
1770         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1771
1772         HWRM_CHECK_RESULT();
1773         HWRM_UNLOCK();
1774
1775         return rc;
1776 }
1777
1778 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1779                                 unsigned int idx __rte_unused)
1780 {
1781         int rc;
1782         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1783         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1784
1785         HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1786
1787         req.update_period_ms = rte_cpu_to_le_32(0);
1788
1789         req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1790
1791         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1792
1793         HWRM_CHECK_RESULT();
1794
1795         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1796
1797         HWRM_UNLOCK();
1798
1799         return rc;
1800 }
1801
1802 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1803                                 unsigned int idx __rte_unused)
1804 {
1805         int rc;
1806         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1807         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1808
1809         HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1810
1811         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1812
1813         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1814
1815         HWRM_CHECK_RESULT();
1816         HWRM_UNLOCK();
1817
1818         return rc;
1819 }
1820
1821 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1822 {
1823         int rc = 0, i, j;
1824         struct hwrm_vnic_alloc_input req = { 0 };
1825         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1826
1827         if (!BNXT_HAS_RING_GRPS(bp))
1828                 goto skip_ring_grps;
1829
1830         /* map ring groups to this vnic */
1831         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1832                 vnic->start_grp_id, vnic->end_grp_id);
1833         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1834                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1835
1836         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1837         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1838         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1839         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1840
1841 skip_ring_grps:
1842         vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1843         HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1844
1845         if (vnic->func_default)
1846                 req.flags =
1847                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1848         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1849
1850         HWRM_CHECK_RESULT();
1851
1852         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1853         HWRM_UNLOCK();
1854         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1855         return rc;
1856 }
1857
1858 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1859                                         struct bnxt_vnic_info *vnic,
1860                                         struct bnxt_plcmodes_cfg *pmode)
1861 {
1862         int rc = 0;
1863         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1864         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1865
1866         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1867
1868         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1869
1870         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1871
1872         HWRM_CHECK_RESULT();
1873
1874         pmode->flags = rte_le_to_cpu_32(resp->flags);
1875         /* dflt_vnic bit doesn't exist in the _cfg command */
1876         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1877         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1878         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1879         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1880
1881         HWRM_UNLOCK();
1882
1883         return rc;
1884 }
1885
1886 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1887                                        struct bnxt_vnic_info *vnic,
1888                                        struct bnxt_plcmodes_cfg *pmode)
1889 {
1890         int rc = 0;
1891         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1892         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1893
1894         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1895                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1896                 return rc;
1897         }
1898
1899         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1900
1901         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1902         req.flags = rte_cpu_to_le_32(pmode->flags);
1903         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1904         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1905         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1906         req.enables = rte_cpu_to_le_32(
1907             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1908             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1909             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1910         );
1911
1912         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1913
1914         HWRM_CHECK_RESULT();
1915         HWRM_UNLOCK();
1916
1917         return rc;
1918 }
1919
1920 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1921 {
1922         int rc = 0;
1923         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1924         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1925         struct bnxt_plcmodes_cfg pmodes = { 0 };
1926         uint32_t ctx_enable_flag = 0;
1927         uint32_t enables = 0;
1928
1929         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1930                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1931                 return rc;
1932         }
1933
1934         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1935         if (rc)
1936                 return rc;
1937
1938         HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
1939
1940         if (BNXT_CHIP_P5(bp)) {
1941                 int dflt_rxq = vnic->start_grp_id;
1942                 struct bnxt_rx_ring_info *rxr;
1943                 struct bnxt_cp_ring_info *cpr;
1944                 struct bnxt_rx_queue *rxq;
1945                 int i;
1946
1947                 /*
1948                  * The first active receive ring is used as the VNIC
1949                  * default receive ring. If there are no active receive
1950                  * rings (all corresponding receive queues are stopped),
1951                  * the first receive ring is used.
1952                  */
1953                 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
1954                         rxq = bp->eth_dev->data->rx_queues[i];
1955                         if (rxq->rx_started) {
1956                                 dflt_rxq = i;
1957                                 break;
1958                         }
1959                 }
1960
1961                 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
1962                 rxr = rxq->rx_ring;
1963                 cpr = rxq->cp_ring;
1964
1965                 req.default_rx_ring_id =
1966                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1967                 req.default_cmpl_ring_id =
1968                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1969                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1970                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1971                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
1972                         enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE;
1973                         req.rx_csum_v2_mode =
1974                                 HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK;
1975                 }
1976                 goto config_mru;
1977         }
1978
1979         /* Only RSS support for now TBD: COS & LB */
1980         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1981         if (vnic->lb_rule != 0xffff)
1982                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1983         if (vnic->cos_rule != 0xffff)
1984                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1985         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1986                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1987                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1988         }
1989         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1990                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1991                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1992         }
1993
1994         enables |= ctx_enable_flag;
1995         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1996         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1997         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1998         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1999
2000 config_mru:
2001         req.enables = rte_cpu_to_le_32(enables);
2002         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2003         req.mru = rte_cpu_to_le_16(vnic->mru);
2004         /* Configure default VNIC only once. */
2005         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
2006                 req.flags |=
2007                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2008                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
2009         }
2010         if (vnic->vlan_strip)
2011                 req.flags |=
2012                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2013         if (vnic->bd_stall)
2014                 req.flags |=
2015                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2016         if (vnic->rss_dflt_cr)
2017                 req.flags |= rte_cpu_to_le_32(
2018                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2019
2020         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2021
2022         HWRM_CHECK_RESULT();
2023         HWRM_UNLOCK();
2024
2025         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2026
2027         return rc;
2028 }
2029
2030 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2031                 int16_t fw_vf_id)
2032 {
2033         int rc = 0;
2034         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2035         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2036
2037         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2038                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2039                 return rc;
2040         }
2041         HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2042
2043         req.enables =
2044                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2045         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2046         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2047
2048         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2049
2050         HWRM_CHECK_RESULT();
2051
2052         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2053         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2054         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2055         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2056         vnic->mru = rte_le_to_cpu_16(resp->mru);
2057         vnic->func_default = rte_le_to_cpu_32(
2058                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2059         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2060                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2061         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2062                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2063         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2064                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2065
2066         HWRM_UNLOCK();
2067
2068         return rc;
2069 }
2070
2071 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2072                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2073 {
2074         int rc = 0;
2075         uint16_t ctx_id;
2076         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2077         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2078                                                 bp->hwrm_cmd_resp_addr;
2079
2080         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2081
2082         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2083         HWRM_CHECK_RESULT();
2084
2085         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2086         if (!BNXT_HAS_RING_GRPS(bp))
2087                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2088         else if (ctx_idx == 0)
2089                 vnic->rss_rule = ctx_id;
2090
2091         HWRM_UNLOCK();
2092
2093         return rc;
2094 }
2095
2096 static
2097 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2098                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2099 {
2100         int rc = 0;
2101         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2102         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2103                                                 bp->hwrm_cmd_resp_addr;
2104
2105         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2106                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2107                 return rc;
2108         }
2109         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2110
2111         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2112
2113         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2114
2115         HWRM_CHECK_RESULT();
2116         HWRM_UNLOCK();
2117
2118         return rc;
2119 }
2120
2121 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2122 {
2123         int rc = 0;
2124
2125         if (BNXT_CHIP_P5(bp)) {
2126                 int j;
2127
2128                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2129                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
2130                                                       vnic,
2131                                                       vnic->fw_grp_ids[j]);
2132                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2133                 }
2134                 vnic->num_lb_ctxts = 0;
2135         } else {
2136                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2137                 vnic->rss_rule = INVALID_HW_RING_ID;
2138         }
2139
2140         return rc;
2141 }
2142
2143 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2144 {
2145         int rc = 0;
2146         struct hwrm_vnic_free_input req = {.req_type = 0 };
2147         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2148
2149         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2150                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2151                 return rc;
2152         }
2153
2154         HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2155
2156         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2157
2158         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2159
2160         HWRM_CHECK_RESULT();
2161         HWRM_UNLOCK();
2162
2163         vnic->fw_vnic_id = INVALID_HW_RING_ID;
2164         /* Configure default VNIC again if necessary. */
2165         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2166                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2167
2168         return rc;
2169 }
2170
2171 static int
2172 bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2173 {
2174         int i;
2175         int rc = 0;
2176         int nr_ctxs = vnic->num_lb_ctxts;
2177         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2178         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2179
2180         for (i = 0; i < nr_ctxs; i++) {
2181                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2182
2183                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2184                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2185                 req.hash_mode_flags = vnic->hash_mode;
2186
2187                 req.hash_key_tbl_addr =
2188                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2189
2190                 req.ring_grp_tbl_addr =
2191                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2192                                          i * HW_HASH_INDEX_SIZE);
2193                 req.ring_table_pair_index = i;
2194                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2195
2196                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2197                                             BNXT_USE_CHIMP_MB);
2198
2199                 HWRM_CHECK_RESULT();
2200                 HWRM_UNLOCK();
2201         }
2202
2203         return rc;
2204 }
2205
2206 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2207                            struct bnxt_vnic_info *vnic)
2208 {
2209         int rc = 0;
2210         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2211         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2212
2213         if (!vnic->rss_table)
2214                 return 0;
2215
2216         if (BNXT_CHIP_P5(bp))
2217                 return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
2218
2219         HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2220
2221         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2222         req.hash_mode_flags = vnic->hash_mode;
2223
2224         req.ring_grp_tbl_addr =
2225             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2226         req.hash_key_tbl_addr =
2227             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2228         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2229         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2230
2231         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2232
2233         HWRM_CHECK_RESULT();
2234         HWRM_UNLOCK();
2235
2236         return rc;
2237 }
2238
2239 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2240                         struct bnxt_vnic_info *vnic)
2241 {
2242         int rc = 0;
2243         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2244         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2245         uint16_t size;
2246
2247         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2248                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2249                 return rc;
2250         }
2251
2252         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2253
2254         req.flags = rte_cpu_to_le_32(
2255                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2256
2257         req.enables = rte_cpu_to_le_32(
2258                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2259
2260         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2261         size -= RTE_PKTMBUF_HEADROOM;
2262         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2263
2264         req.jumbo_thresh = rte_cpu_to_le_16(size);
2265         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2266
2267         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2268
2269         HWRM_CHECK_RESULT();
2270         HWRM_UNLOCK();
2271
2272         return rc;
2273 }
2274
2275 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2276                         struct bnxt_vnic_info *vnic, bool enable)
2277 {
2278         int rc = 0;
2279         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2280         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2281
2282         if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
2283                 if (enable)
2284                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2285                 return -ENOTSUP;
2286         }
2287
2288         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2289                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2290                 return 0;
2291         }
2292
2293         HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2294
2295         if (enable) {
2296                 req.enables = rte_cpu_to_le_32(
2297                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2298                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2299                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2300                 req.flags = rte_cpu_to_le_32(
2301                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2302                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2303                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2304                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2305                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2306                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2307                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2308                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2309                 req.min_agg_len = rte_cpu_to_le_32(512);
2310         }
2311         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2312
2313         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2314
2315         HWRM_CHECK_RESULT();
2316         HWRM_UNLOCK();
2317
2318         return rc;
2319 }
2320
2321 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2322 {
2323         struct hwrm_func_cfg_input req = {0};
2324         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2325         int rc;
2326
2327         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2328         req.enables = rte_cpu_to_le_32(
2329                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2330         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2331         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2332
2333         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2334
2335         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2336         HWRM_CHECK_RESULT();
2337         HWRM_UNLOCK();
2338
2339         bp->pf->vf_info[vf].random_mac = false;
2340
2341         return rc;
2342 }
2343
2344 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2345                                   uint64_t *dropped)
2346 {
2347         int rc = 0;
2348         struct hwrm_func_qstats_input req = {.req_type = 0};
2349         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2350
2351         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2352
2353         req.fid = rte_cpu_to_le_16(fid);
2354
2355         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2356
2357         HWRM_CHECK_RESULT();
2358
2359         if (dropped)
2360                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2361
2362         HWRM_UNLOCK();
2363
2364         return rc;
2365 }
2366
2367 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2368                           struct rte_eth_stats *stats,
2369                           struct hwrm_func_qstats_output *func_qstats)
2370 {
2371         int rc = 0;
2372         struct hwrm_func_qstats_input req = {.req_type = 0};
2373         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2374
2375         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2376
2377         req.fid = rte_cpu_to_le_16(fid);
2378
2379         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2380
2381         HWRM_CHECK_RESULT();
2382         if (func_qstats)
2383                 memcpy(func_qstats, resp,
2384                        sizeof(struct hwrm_func_qstats_output));
2385
2386         if (!stats)
2387                 goto exit;
2388
2389         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2390         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2391         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2392         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2393         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2394         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2395
2396         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2397         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2398         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2399         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2400         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2401         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2402
2403         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2404         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2405         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2406
2407 exit:
2408         HWRM_UNLOCK();
2409
2410         return rc;
2411 }
2412
2413 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2414 {
2415         int rc = 0;
2416         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2417         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2418
2419         HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2420
2421         req.fid = rte_cpu_to_le_16(fid);
2422
2423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2424
2425         HWRM_CHECK_RESULT();
2426         HWRM_UNLOCK();
2427
2428         return rc;
2429 }
2430
2431 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2432 {
2433         unsigned int i;
2434         int rc = 0;
2435
2436         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2437                 struct bnxt_tx_queue *txq;
2438                 struct bnxt_rx_queue *rxq;
2439                 struct bnxt_cp_ring_info *cpr;
2440
2441                 if (i >= bp->rx_cp_nr_rings) {
2442                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2443                         cpr = txq->cp_ring;
2444                 } else {
2445                         rxq = bp->rx_queues[i];
2446                         cpr = rxq->cp_ring;
2447                 }
2448
2449                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2450                 if (rc)
2451                         return rc;
2452         }
2453         return 0;
2454 }
2455
2456 static int
2457 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2458 {
2459         int rc;
2460         unsigned int i;
2461         struct bnxt_cp_ring_info *cpr;
2462
2463         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2464
2465                 if (i >= bp->rx_cp_nr_rings) {
2466                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2467                 } else {
2468                         cpr = bp->rx_queues[i]->cp_ring;
2469                         if (BNXT_HAS_RING_GRPS(bp))
2470                                 bp->grp_info[i].fw_stats_ctx = -1;
2471                 }
2472                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2473                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2474                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2475                         if (rc)
2476                                 return rc;
2477                 }
2478         }
2479         return 0;
2480 }
2481
2482 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2483 {
2484         unsigned int i;
2485         int rc = 0;
2486
2487         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2488                 struct bnxt_tx_queue *txq;
2489                 struct bnxt_rx_queue *rxq;
2490                 struct bnxt_cp_ring_info *cpr;
2491
2492                 if (i >= bp->rx_cp_nr_rings) {
2493                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2494                         cpr = txq->cp_ring;
2495                 } else {
2496                         rxq = bp->rx_queues[i];
2497                         cpr = rxq->cp_ring;
2498                 }
2499
2500                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2501
2502                 if (rc)
2503                         return rc;
2504         }
2505         return rc;
2506 }
2507
2508 static int
2509 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2510 {
2511         uint16_t idx;
2512         uint32_t rc = 0;
2513
2514         if (!BNXT_HAS_RING_GRPS(bp))
2515                 return 0;
2516
2517         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2518
2519                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2520                         continue;
2521
2522                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2523
2524                 if (rc)
2525                         return rc;
2526         }
2527         return rc;
2528 }
2529
2530 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2531 {
2532         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2533
2534         bnxt_hwrm_ring_free(bp, cp_ring,
2535                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2536         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2537         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2538                                      sizeof(*cpr->cp_desc_ring));
2539         cpr->cp_raw_cons = 0;
2540         cpr->valid = 0;
2541 }
2542
2543 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2544 {
2545         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2546
2547         bnxt_hwrm_ring_free(bp, cp_ring,
2548                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2549         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2550         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2551                         sizeof(*cpr->cp_desc_ring));
2552         cpr->cp_raw_cons = 0;
2553         cpr->valid = 0;
2554 }
2555
2556 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2557 {
2558         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2559         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2560         struct bnxt_ring *ring = rxr->rx_ring_struct;
2561         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2562
2563         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2564                 bnxt_hwrm_ring_free(bp, ring,
2565                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2566                 ring->fw_ring_id = INVALID_HW_RING_ID;
2567                 if (BNXT_HAS_RING_GRPS(bp))
2568                         bp->grp_info[queue_index].rx_fw_ring_id =
2569                                                         INVALID_HW_RING_ID;
2570         }
2571         ring = rxr->ag_ring_struct;
2572         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2573                 bnxt_hwrm_ring_free(bp, ring,
2574                                     BNXT_CHIP_P5(bp) ?
2575                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2576                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2577                 if (BNXT_HAS_RING_GRPS(bp))
2578                         bp->grp_info[queue_index].ag_fw_ring_id =
2579                                                         INVALID_HW_RING_ID;
2580         }
2581         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2582                 bnxt_free_cp_ring(bp, cpr);
2583
2584         if (BNXT_HAS_RING_GRPS(bp))
2585                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2586 }
2587
2588 static int
2589 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2590 {
2591         unsigned int i;
2592
2593         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2594                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2595                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2596                 struct bnxt_ring *ring = txr->tx_ring_struct;
2597                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2598
2599                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2600                         bnxt_hwrm_ring_free(bp, ring,
2601                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2602                         ring->fw_ring_id = INVALID_HW_RING_ID;
2603                         memset(txr->tx_desc_ring, 0,
2604                                         txr->tx_ring_struct->ring_size *
2605                                         sizeof(*txr->tx_desc_ring));
2606                         memset(txr->tx_buf_ring, 0,
2607                                         txr->tx_ring_struct->ring_size *
2608                                         sizeof(*txr->tx_buf_ring));
2609                         txr->tx_raw_prod = 0;
2610                         txr->tx_raw_cons = 0;
2611                 }
2612                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2613                         bnxt_free_cp_ring(bp, cpr);
2614                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2615                 }
2616         }
2617
2618         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2619                 bnxt_free_hwrm_rx_ring(bp, i);
2620
2621         return 0;
2622 }
2623
2624 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2625 {
2626         uint16_t i;
2627         uint32_t rc = 0;
2628
2629         if (!BNXT_HAS_RING_GRPS(bp))
2630                 return 0;
2631
2632         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2633                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2634                 if (rc)
2635                         return rc;
2636         }
2637         return rc;
2638 }
2639
2640 /*
2641  * HWRM utility functions
2642  */
2643
2644 void bnxt_free_hwrm_resources(struct bnxt *bp)
2645 {
2646         /* Release memzone */
2647         rte_free(bp->hwrm_cmd_resp_addr);
2648         rte_free(bp->hwrm_short_cmd_req_addr);
2649         bp->hwrm_cmd_resp_addr = NULL;
2650         bp->hwrm_short_cmd_req_addr = NULL;
2651         bp->hwrm_cmd_resp_dma_addr = 0;
2652         bp->hwrm_short_cmd_req_dma_addr = 0;
2653 }
2654
2655 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2656 {
2657         struct rte_pci_device *pdev = bp->pdev;
2658         char type[RTE_MEMZONE_NAMESIZE];
2659
2660         sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2661                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2662         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2663         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2664         if (bp->hwrm_cmd_resp_addr == NULL)
2665                 return -ENOMEM;
2666         bp->hwrm_cmd_resp_dma_addr =
2667                 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2668         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2669                 PMD_DRV_LOG(ERR,
2670                         "unable to map response address to physical memory\n");
2671                 return -ENOMEM;
2672         }
2673         rte_spinlock_init(&bp->hwrm_lock);
2674
2675         return 0;
2676 }
2677
2678 int
2679 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2680 {
2681         int rc = 0;
2682
2683         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2684                 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2685                 if (rc)
2686                         return rc;
2687         } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2688                 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2689                 if (rc)
2690                         return rc;
2691         }
2692
2693         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2694         return rc;
2695 }
2696
2697 static int
2698 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2699 {
2700         struct bnxt_filter_info *filter;
2701         int rc = 0;
2702
2703         STAILQ_FOREACH(filter, &vnic->filter, next) {
2704                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2705                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2706                 bnxt_free_filter(bp, filter);
2707         }
2708         return rc;
2709 }
2710
2711 static int
2712 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2713 {
2714         struct bnxt_filter_info *filter;
2715         struct rte_flow *flow;
2716         int rc = 0;
2717
2718         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2719                 flow = STAILQ_FIRST(&vnic->flow_list);
2720                 filter = flow->filter;
2721                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2722                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2723
2724                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2725                 rte_free(flow);
2726         }
2727         return rc;
2728 }
2729
2730 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2731 {
2732         struct bnxt_filter_info *filter;
2733         int rc = 0;
2734
2735         STAILQ_FOREACH(filter, &vnic->filter, next) {
2736                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2737                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2738                                                      filter);
2739                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2740                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2741                                                          filter);
2742                 else
2743                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2744                                                      filter);
2745                 if (rc)
2746                         break;
2747         }
2748         return rc;
2749 }
2750
2751 static void
2752 bnxt_free_tunnel_ports(struct bnxt *bp)
2753 {
2754         if (bp->vxlan_port_cnt)
2755                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2756                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2757
2758         if (bp->geneve_port_cnt)
2759                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2760                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2761 }
2762
2763 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2764 {
2765         int i;
2766
2767         if (bp->vnic_info == NULL)
2768                 return;
2769
2770         /*
2771          * Cleanup VNICs in reverse order, to make sure the L2 filter
2772          * from vnic0 is last to be cleaned up.
2773          */
2774         for (i = bp->max_vnics - 1; i >= 0; i--) {
2775                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2776
2777                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2778                         continue;
2779
2780                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2781
2782                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2783
2784                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2785
2786                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2787
2788                 bnxt_hwrm_vnic_free(bp, vnic);
2789
2790                 rte_free(vnic->fw_grp_ids);
2791         }
2792         /* Ring resources */
2793         bnxt_free_all_hwrm_rings(bp);
2794         bnxt_free_all_hwrm_ring_grps(bp);
2795         bnxt_free_all_hwrm_stat_ctxs(bp);
2796         bnxt_free_tunnel_ports(bp);
2797 }
2798
2799 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2800 {
2801         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2802
2803         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2804                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2805
2806         switch (conf_link_speed) {
2807         case ETH_LINK_SPEED_10M_HD:
2808         case ETH_LINK_SPEED_100M_HD:
2809                 /* FALLTHROUGH */
2810                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2811         }
2812         return hw_link_duplex;
2813 }
2814
2815 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2816 {
2817         return !conf_link;
2818 }
2819
2820 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2821                                           uint16_t pam4_link)
2822 {
2823         uint16_t eth_link_speed = 0;
2824
2825         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2826                 return ETH_LINK_SPEED_AUTONEG;
2827
2828         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2829         case ETH_LINK_SPEED_100M:
2830         case ETH_LINK_SPEED_100M_HD:
2831                 /* FALLTHROUGH */
2832                 eth_link_speed =
2833                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2834                 break;
2835         case ETH_LINK_SPEED_1G:
2836                 eth_link_speed =
2837                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2838                 break;
2839         case ETH_LINK_SPEED_2_5G:
2840                 eth_link_speed =
2841                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2842                 break;
2843         case ETH_LINK_SPEED_10G:
2844                 eth_link_speed =
2845                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2846                 break;
2847         case ETH_LINK_SPEED_20G:
2848                 eth_link_speed =
2849                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2850                 break;
2851         case ETH_LINK_SPEED_25G:
2852                 eth_link_speed =
2853                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2854                 break;
2855         case ETH_LINK_SPEED_40G:
2856                 eth_link_speed =
2857                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2858                 break;
2859         case ETH_LINK_SPEED_50G:
2860                 eth_link_speed = pam4_link ?
2861                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
2862                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2863                 break;
2864         case ETH_LINK_SPEED_100G:
2865                 eth_link_speed = pam4_link ?
2866                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
2867                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2868                 break;
2869         case ETH_LINK_SPEED_200G:
2870                 eth_link_speed =
2871                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2872                 break;
2873         default:
2874                 PMD_DRV_LOG(ERR,
2875                         "Unsupported link speed %d; default to AUTO\n",
2876                         conf_link_speed);
2877                 break;
2878         }
2879         return eth_link_speed;
2880 }
2881
2882 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2883                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2884                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2885                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
2886                 ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
2887
2888 static int bnxt_validate_link_speed(struct bnxt *bp)
2889 {
2890         uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
2891         uint16_t port_id = bp->eth_dev->data->port_id;
2892         uint32_t link_speed_capa;
2893         uint32_t one_speed;
2894
2895         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2896                 return 0;
2897
2898         link_speed_capa = bnxt_get_speed_capabilities(bp);
2899
2900         if (link_speed & ETH_LINK_SPEED_FIXED) {
2901                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2902
2903                 if (one_speed & (one_speed - 1)) {
2904                         PMD_DRV_LOG(ERR,
2905                                 "Invalid advertised speeds (%u) for port %u\n",
2906                                 link_speed, port_id);
2907                         return -EINVAL;
2908                 }
2909                 if ((one_speed & link_speed_capa) != one_speed) {
2910                         PMD_DRV_LOG(ERR,
2911                                 "Unsupported advertised speed (%u) for port %u\n",
2912                                 link_speed, port_id);
2913                         return -EINVAL;
2914                 }
2915         } else {
2916                 if (!(link_speed & link_speed_capa)) {
2917                         PMD_DRV_LOG(ERR,
2918                                 "Unsupported advertised speeds (%u) for port %u\n",
2919                                 link_speed, port_id);
2920                         return -EINVAL;
2921                 }
2922         }
2923         return 0;
2924 }
2925
2926 static uint16_t
2927 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2928 {
2929         uint16_t ret = 0;
2930
2931         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2932                 if (bp->link_info->support_speeds)
2933                         return bp->link_info->support_speeds;
2934                 link_speed = BNXT_SUPPORTED_SPEEDS;
2935         }
2936
2937         if (link_speed & ETH_LINK_SPEED_100M)
2938                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2939         if (link_speed & ETH_LINK_SPEED_100M_HD)
2940                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2941         if (link_speed & ETH_LINK_SPEED_1G)
2942                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2943         if (link_speed & ETH_LINK_SPEED_2_5G)
2944                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2945         if (link_speed & ETH_LINK_SPEED_10G)
2946                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2947         if (link_speed & ETH_LINK_SPEED_20G)
2948                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2949         if (link_speed & ETH_LINK_SPEED_25G)
2950                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2951         if (link_speed & ETH_LINK_SPEED_40G)
2952                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2953         if (link_speed & ETH_LINK_SPEED_50G)
2954                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2955         if (link_speed & ETH_LINK_SPEED_100G)
2956                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2957         if (link_speed & ETH_LINK_SPEED_200G)
2958                 ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2959         return ret;
2960 }
2961
2962 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2963 {
2964         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2965
2966         switch (hw_link_speed) {
2967         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2968                 eth_link_speed = ETH_SPEED_NUM_100M;
2969                 break;
2970         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2971                 eth_link_speed = ETH_SPEED_NUM_1G;
2972                 break;
2973         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2974                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2975                 break;
2976         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2977                 eth_link_speed = ETH_SPEED_NUM_10G;
2978                 break;
2979         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2980                 eth_link_speed = ETH_SPEED_NUM_20G;
2981                 break;
2982         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2983                 eth_link_speed = ETH_SPEED_NUM_25G;
2984                 break;
2985         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2986                 eth_link_speed = ETH_SPEED_NUM_40G;
2987                 break;
2988         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2989                 eth_link_speed = ETH_SPEED_NUM_50G;
2990                 break;
2991         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2992                 eth_link_speed = ETH_SPEED_NUM_100G;
2993                 break;
2994         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
2995                 eth_link_speed = ETH_SPEED_NUM_200G;
2996                 break;
2997         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2998         default:
2999                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
3000                         hw_link_speed);
3001                 break;
3002         }
3003         return eth_link_speed;
3004 }
3005
3006 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
3007 {
3008         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3009
3010         switch (hw_link_duplex) {
3011         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3012         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3013                 /* FALLTHROUGH */
3014                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3015                 break;
3016         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3017                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
3018                 break;
3019         default:
3020                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3021                         hw_link_duplex);
3022                 break;
3023         }
3024         return eth_link_duplex;
3025 }
3026
3027 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3028 {
3029         int rc = 0;
3030         struct bnxt_link_info *link_info = bp->link_info;
3031
3032         rc = bnxt_hwrm_port_phy_qcaps(bp);
3033         if (rc)
3034                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3035
3036         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3037         if (rc) {
3038                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3039                 goto exit;
3040         }
3041
3042         if (link_info->link_speed)
3043                 link->link_speed =
3044                         bnxt_parse_hw_link_speed(link_info->link_speed);
3045         else
3046                 link->link_speed = ETH_SPEED_NUM_NONE;
3047         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3048         link->link_status = link_info->link_up;
3049         link->link_autoneg = link_info->auto_mode ==
3050                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3051                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
3052 exit:
3053         return rc;
3054 }
3055
3056 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3057 {
3058         int rc = 0;
3059         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3060         struct bnxt_link_info link_req;
3061         uint16_t speed, autoneg;
3062
3063         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3064                 return 0;
3065
3066         rc = bnxt_validate_link_speed(bp);
3067         if (rc)
3068                 goto error;
3069
3070         memset(&link_req, 0, sizeof(link_req));
3071         link_req.link_up = link_up;
3072         if (!link_up)
3073                 goto port_phy_cfg;
3074
3075         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3076         if (BNXT_CHIP_P5(bp) &&
3077             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3078                 /* 40G is not supported as part of media auto detect.
3079                  * The speed should be forced and autoneg disabled
3080                  * to configure 40G speed.
3081                  */
3082                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3083                 autoneg = 0;
3084         }
3085
3086         /* No auto speeds and no auto_pam4_link. Disable autoneg */
3087         if (bp->link_info->auto_link_speed == 0 &&
3088             bp->link_info->link_signal_mode &&
3089             bp->link_info->auto_pam4_link_speeds == 0)
3090                 autoneg = 0;
3091
3092         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3093                                           bp->link_info->link_signal_mode);
3094         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3095         /* Autoneg can be done only when the FW allows.
3096          * When user configures fixed speed of 40G and later changes to
3097          * any other speed, auto_link_speed/force_link_speed is still set
3098          * to 40G until link comes up at new speed.
3099          */
3100         if (autoneg == 1 &&
3101             !(!BNXT_CHIP_P5(bp) &&
3102               (bp->link_info->auto_link_speed ||
3103                bp->link_info->force_link_speed))) {
3104                 link_req.phy_flags |=
3105                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3106                 link_req.auto_link_speed_mask =
3107                         bnxt_parse_eth_link_speed_mask(bp,
3108                                                        dev_conf->link_speeds);
3109         } else {
3110                 if (bp->link_info->phy_type ==
3111                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3112                     bp->link_info->phy_type ==
3113                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3114                     bp->link_info->media_type ==
3115                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3116                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3117                         return -EINVAL;
3118                 }
3119
3120                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3121                 /* If user wants a particular speed try that first. */
3122                 if (speed)
3123                         link_req.link_speed = speed;
3124                 else if (bp->link_info->force_pam4_link_speed)
3125                         link_req.link_speed =
3126                                 bp->link_info->force_pam4_link_speed;
3127                 else if (bp->link_info->auto_pam4_link_speeds)
3128                         link_req.link_speed =
3129                                 bp->link_info->auto_pam4_link_speeds;
3130                 else if (bp->link_info->support_pam4_speeds)
3131                         link_req.link_speed =
3132                                 bp->link_info->support_pam4_speeds;
3133                 else if (bp->link_info->force_link_speed)
3134                         link_req.link_speed = bp->link_info->force_link_speed;
3135                 else
3136                         link_req.link_speed = bp->link_info->auto_link_speed;
3137                 /* Auto PAM4 link speed is zero, but auto_link_speed is not
3138                  * zero. Use the auto_link_speed.
3139                  */
3140                 if (bp->link_info->auto_link_speed != 0 &&
3141                     bp->link_info->auto_pam4_link_speeds == 0)
3142                         link_req.link_speed = bp->link_info->auto_link_speed;
3143         }
3144         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3145         link_req.auto_pause = bp->link_info->auto_pause;
3146         link_req.force_pause = bp->link_info->force_pause;
3147
3148 port_phy_cfg:
3149         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3150         if (rc) {
3151                 PMD_DRV_LOG(ERR,
3152                         "Set link config failed with rc %d\n", rc);
3153         }
3154
3155 error:
3156         return rc;
3157 }
3158
3159 /* JIRA 22088 */
3160 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3161 {
3162         struct hwrm_func_qcfg_input req = {0};
3163         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3164         uint16_t flags;
3165         int rc = 0;
3166         bp->func_svif = BNXT_SVIF_INVALID;
3167         uint16_t svif_info;
3168
3169         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3170         req.fid = rte_cpu_to_le_16(0xffff);
3171
3172         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3173
3174         HWRM_CHECK_RESULT();
3175
3176         /* Hard Coded.. 0xfff VLAN ID mask */
3177         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3178
3179         svif_info = rte_le_to_cpu_16(resp->svif_info);
3180         if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3181                 bp->func_svif = svif_info &
3182                                      HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3183
3184         flags = rte_le_to_cpu_16(resp->flags);
3185         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3186                 bp->flags |= BNXT_FLAG_MULTI_HOST;
3187
3188         if (BNXT_VF(bp) &&
3189             !BNXT_VF_IS_TRUSTED(bp) &&
3190             (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3191                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3192                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3193         } else if (BNXT_VF(bp) &&
3194                    BNXT_VF_IS_TRUSTED(bp) &&
3195                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3196                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3197                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3198         }
3199
3200         if (mtu)
3201                 *mtu = rte_le_to_cpu_16(resp->mtu);
3202
3203         switch (resp->port_partition_type) {
3204         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3205         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3206         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3207                 /* FALLTHROUGH */
3208                 bp->flags |= BNXT_FLAG_NPAR_PF;
3209                 break;
3210         default:
3211                 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3212                 break;
3213         }
3214
3215         bp->legacy_db_size =
3216                 rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
3217
3218         HWRM_UNLOCK();
3219
3220         return rc;
3221 }
3222
3223 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3224 {
3225         struct hwrm_func_qcfg_input req = {0};
3226         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3227         int rc;
3228
3229         if (!BNXT_VF_IS_TRUSTED(bp))
3230                 return 0;
3231
3232         if (!bp->parent)
3233                 return -EINVAL;
3234
3235         bp->parent->fid = BNXT_PF_FID_INVALID;
3236
3237         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3238
3239         req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3240
3241         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3242
3243         HWRM_CHECK_RESULT();
3244
3245         memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3246         bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3247         bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3248         bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3249
3250         /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3251         if (bp->parent->vnic == 0) {
3252                 PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
3253                 /* Use hard-coded values appropriate for current Wh+ fw. */
3254                 if (bp->parent->fid == 2)
3255                         bp->parent->vnic = 0x100;
3256                 else
3257                         bp->parent->vnic = 1;
3258         }
3259
3260         HWRM_UNLOCK();
3261
3262         return 0;
3263 }
3264
3265 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3266                                  uint16_t *vnic_id, uint16_t *svif)
3267 {
3268         struct hwrm_func_qcfg_input req = {0};
3269         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3270         uint16_t svif_info;
3271         int rc = 0;
3272
3273         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3274         req.fid = rte_cpu_to_le_16(fid);
3275
3276         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3277
3278         HWRM_CHECK_RESULT();
3279
3280         if (vnic_id)
3281                 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3282
3283         svif_info = rte_le_to_cpu_16(resp->svif_info);
3284         if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3285                 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3286
3287         HWRM_UNLOCK();
3288
3289         return rc;
3290 }
3291
3292 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3293 {
3294         struct hwrm_port_mac_qcfg_input req = {0};
3295         struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3296         uint16_t port_svif_info;
3297         int rc;
3298
3299         bp->port_svif = BNXT_SVIF_INVALID;
3300
3301         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3302                 return 0;
3303
3304         HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3305
3306         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3307
3308         HWRM_CHECK_RESULT_SILENT();
3309
3310         port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3311         if (port_svif_info &
3312             HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3313                 bp->port_svif = port_svif_info &
3314                         HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3315
3316         HWRM_UNLOCK();
3317
3318         return 0;
3319 }
3320
3321 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
3322                                  struct bnxt_pf_resource_info *pf_resc)
3323 {
3324         struct hwrm_func_cfg_input req = {0};
3325         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3326         uint32_t enables;
3327         int rc;
3328
3329         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3330                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3331                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3332                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3333                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3334                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3335                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3336                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3337                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3338
3339         if (BNXT_HAS_RING_GRPS(bp)) {
3340                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3341                 req.num_hw_ring_grps =
3342                         rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
3343         } else if (BNXT_HAS_NQ(bp)) {
3344                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3345                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3346         }
3347
3348         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3349         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3350         req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3351         req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
3352         req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
3353         req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
3354         req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
3355         req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
3356         req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
3357         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3358         req.fid = rte_cpu_to_le_16(0xffff);
3359         req.enables = rte_cpu_to_le_32(enables);
3360
3361         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3362
3363         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3364
3365         HWRM_CHECK_RESULT();
3366         HWRM_UNLOCK();
3367
3368         return rc;
3369 }
3370
3371 /* min values are the guaranteed resources and max values are subject
3372  * to availability. The strategy for now is to keep both min & max
3373  * values the same.
3374  */
3375 static void
3376 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
3377                               struct hwrm_func_vf_resource_cfg_input *req,
3378                               int num_vfs)
3379 {
3380         req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3381                                                (num_vfs + 1));
3382         req->min_rsscos_ctx = req->max_rsscos_ctx;
3383         req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3384         req->min_stat_ctx = req->max_stat_ctx;
3385         req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3386                                                (num_vfs + 1));
3387         req->min_cmpl_rings = req->max_cmpl_rings;
3388         req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3389         req->min_tx_rings = req->max_tx_rings;
3390         req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3391         req->min_rx_rings = req->max_rx_rings;
3392         req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3393         req->min_l2_ctxs = req->max_l2_ctxs;
3394         /* TODO: For now, do not support VMDq/RFS on VFs. */
3395         req->max_vnics = rte_cpu_to_le_16(1);
3396         req->min_vnics = req->max_vnics;
3397         req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3398                                                  (num_vfs + 1));
3399         req->min_hw_ring_grps = req->max_hw_ring_grps;
3400         req->flags =
3401          rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
3402 }
3403
3404 static void
3405 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
3406                               struct hwrm_func_cfg_input *req,
3407                               int num_vfs)
3408 {
3409         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3410                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3411                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3412                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3413                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3414                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3415                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3416                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3417                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3418                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3419
3420         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3421                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3422                                     BNXT_NUM_VLANS);
3423         req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3424         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3425                                                 (num_vfs + 1));
3426         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3427         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3428                                                (num_vfs + 1));
3429         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3430         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3431         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3432         /* TODO: For now, do not support VMDq/RFS on VFs. */
3433         req->num_vnics = rte_cpu_to_le_16(1);
3434         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3435                                                  (num_vfs + 1));
3436 }
3437
3438 /* Update the port wide resource values based on how many resources
3439  * got allocated to the VF.
3440  */
3441 static int bnxt_update_max_resources(struct bnxt *bp,
3442                                      int vf)
3443 {
3444         struct hwrm_func_qcfg_input req = {0};
3445         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3446         int rc;
3447
3448         /* Get the actual allocated values now */
3449         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3450         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3452         HWRM_CHECK_RESULT();
3453
3454         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3455         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
3456         bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3457         bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
3458         bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
3459         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
3460         bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3461
3462         HWRM_UNLOCK();
3463
3464         return 0;
3465 }
3466
3467 /* Update the PF resource values based on how many resources
3468  * got allocated to it.
3469  */
3470 static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
3471 {
3472         struct hwrm_func_qcfg_input req = {0};
3473         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3474         int rc;
3475
3476         /* Get the actual allocated values now */
3477         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3478         req.fid = rte_cpu_to_le_16(0xffff);
3479         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3480         HWRM_CHECK_RESULT();
3481
3482         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3483         bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3484         bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3485         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3486         bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3487         bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3488         bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3489         bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
3490
3491         HWRM_UNLOCK();
3492
3493         return 0;
3494 }
3495
3496 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3497 {
3498         struct hwrm_func_qcfg_input req = {0};
3499         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3500         int rc;
3501
3502         /* Check for zero MAC address */
3503         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3504         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3505         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3506         HWRM_CHECK_RESULT();
3507         rc = rte_le_to_cpu_16(resp->vlan);
3508
3509         HWRM_UNLOCK();
3510
3511         return rc;
3512 }
3513
3514 static int bnxt_query_pf_resources(struct bnxt *bp,
3515                                    struct bnxt_pf_resource_info *pf_resc)
3516 {
3517         struct hwrm_func_qcfg_input req = {0};
3518         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3519         int rc;
3520
3521         /* And copy the allocated numbers into the pf struct */
3522         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3523         req.fid = rte_cpu_to_le_16(0xffff);
3524         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3525         HWRM_CHECK_RESULT();
3526
3527         pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3528         pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3529         pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3530         pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3531         pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3532         pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3533         pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
3534         bp->pf->evb_mode = resp->evb_mode;
3535
3536         HWRM_UNLOCK();
3537
3538         return rc;
3539 }
3540
3541 static void
3542 bnxt_calculate_pf_resources(struct bnxt *bp,
3543                             struct bnxt_pf_resource_info *pf_resc,
3544                             int num_vfs)
3545 {
3546         if (!num_vfs) {
3547                 pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
3548                 pf_resc->num_stat_ctxs = bp->max_stat_ctx;
3549                 pf_resc->num_cp_rings = bp->max_cp_rings;
3550                 pf_resc->num_tx_rings = bp->max_tx_rings;
3551                 pf_resc->num_rx_rings = bp->max_rx_rings;
3552                 pf_resc->num_l2_ctxs = bp->max_l2_ctx;
3553                 pf_resc->num_hw_ring_grps = bp->max_ring_grps;
3554
3555                 return;
3556         }
3557
3558         pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
3559                                    bp->max_rsscos_ctx % (num_vfs + 1);
3560         pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
3561                                  bp->max_stat_ctx % (num_vfs + 1);
3562         pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
3563                                 bp->max_cp_rings % (num_vfs + 1);
3564         pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
3565                                 bp->max_tx_rings % (num_vfs + 1);
3566         pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
3567                                 bp->max_rx_rings % (num_vfs + 1);
3568         pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
3569                                bp->max_l2_ctx % (num_vfs + 1);
3570         pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
3571                                     bp->max_ring_grps % (num_vfs + 1);
3572 }
3573
3574 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3575 {
3576         struct bnxt_pf_resource_info pf_resc = { 0 };
3577         int rc;
3578
3579         if (!BNXT_PF(bp)) {
3580                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3581                 return -EINVAL;
3582         }
3583
3584         rc = bnxt_hwrm_func_qcaps(bp);
3585         if (rc)
3586                 return rc;
3587
3588         bnxt_calculate_pf_resources(bp, &pf_resc, 0);
3589
3590         bp->pf->func_cfg_flags &=
3591                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3592                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3593         bp->pf->func_cfg_flags |=
3594                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3595
3596         rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
3597         if (rc)
3598                 return rc;
3599
3600         rc = bnxt_update_max_resources_pf_only(bp);
3601
3602         return rc;
3603 }
3604
3605 static int
3606 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
3607 {
3608         size_t req_buf_sz, sz;
3609         int i, rc;
3610
3611         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3612         bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3613                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3614         if (bp->pf->vf_req_buf == NULL) {
3615                 return -ENOMEM;
3616         }
3617
3618         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3619                 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3620
3621         for (i = 0; i < num_vfs; i++)
3622                 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3623                                              (i * HWRM_MAX_REQ_LEN);
3624
3625         rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
3626         if (rc)
3627                 rte_free(bp->pf->vf_req_buf);
3628
3629         return rc;
3630 }
3631
3632 static int
3633 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
3634 {
3635         struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3636         struct hwrm_func_vf_resource_cfg_input req = {0};
3637         int i, rc = 0;
3638
3639         bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
3640         bp->pf->active_vfs = 0;
3641         for (i = 0; i < num_vfs; i++) {
3642                 HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
3643                 req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3644                 rc = bnxt_hwrm_send_message(bp,
3645                                             &req,
3646                                             sizeof(req),
3647                                             BNXT_USE_CHIMP_MB);
3648                 if (rc || resp->error_code) {
3649                         PMD_DRV_LOG(ERR,
3650                                 "Failed to initialize VF %d\n", i);
3651                         PMD_DRV_LOG(ERR,
3652                                 "Not all VFs available. (%d, %d)\n",
3653                                 rc, resp->error_code);
3654                         HWRM_UNLOCK();
3655
3656                         /* If the first VF configuration itself fails,
3657                          * unregister the vf_fwd_request buffer.
3658                          */
3659                         if (i == 0)
3660                                 bnxt_hwrm_func_buf_unrgtr(bp);
3661                         break;
3662                 }
3663                 HWRM_UNLOCK();
3664
3665                 /* Update the max resource values based on the resource values
3666                  * allocated to the VF.
3667                  */
3668                 bnxt_update_max_resources(bp, i);
3669                 bp->pf->active_vfs++;
3670                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3671         }
3672
3673         return 0;
3674 }
3675
3676 static int
3677 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
3678 {
3679         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3680         struct hwrm_func_cfg_input req = {0};
3681         int i, rc;
3682
3683         bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
3684
3685         bp->pf->active_vfs = 0;
3686         for (i = 0; i < num_vfs; i++) {
3687                 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3688                 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3689                 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3690                 rc = bnxt_hwrm_send_message(bp,
3691                                             &req,
3692                                             sizeof(req),
3693                                             BNXT_USE_CHIMP_MB);
3694
3695                 /* Clear enable flag for next pass */
3696                 req.enables &= ~rte_cpu_to_le_32(
3697                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3698
3699                 if (rc || resp->error_code) {
3700                         PMD_DRV_LOG(ERR,
3701                                 "Failed to initialize VF %d\n", i);
3702                         PMD_DRV_LOG(ERR,
3703                                 "Not all VFs available. (%d, %d)\n",
3704                                 rc, resp->error_code);
3705                         HWRM_UNLOCK();
3706
3707                         /* If the first VF configuration itself fails,
3708                          * unregister the vf_fwd_request buffer.
3709                          */
3710                         if (i == 0)
3711                                 bnxt_hwrm_func_buf_unrgtr(bp);
3712                         break;
3713                 }
3714
3715                 HWRM_UNLOCK();
3716
3717                 /* Update the max resource values based on the resource values
3718                  * allocated to the VF.
3719                  */
3720                 bnxt_update_max_resources(bp, i);
3721                 bp->pf->active_vfs++;
3722                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3723         }
3724
3725         return 0;
3726 }
3727
3728 static void
3729 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
3730 {
3731         if (bp->flags & BNXT_FLAG_NEW_RM)
3732                 bnxt_process_vf_resc_config_new(bp, num_vfs);
3733         else
3734                 bnxt_process_vf_resc_config_old(bp, num_vfs);
3735 }
3736
3737 static void
3738 bnxt_update_pf_resources(struct bnxt *bp,
3739                          struct bnxt_pf_resource_info *pf_resc)
3740 {
3741         bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
3742         bp->max_stat_ctx = pf_resc->num_stat_ctxs;
3743         bp->max_cp_rings = pf_resc->num_cp_rings;
3744         bp->max_tx_rings = pf_resc->num_tx_rings;
3745         bp->max_rx_rings = pf_resc->num_rx_rings;
3746         bp->max_ring_grps = pf_resc->num_hw_ring_grps;
3747 }
3748
3749 static int32_t
3750 bnxt_configure_pf_resources(struct bnxt *bp,
3751                             struct bnxt_pf_resource_info *pf_resc)
3752 {
3753         /*
3754          * We're using STD_TX_RING_MODE here which will limit the TX
3755          * rings. This will allow QoS to function properly. Not setting this
3756          * will cause PF rings to break bandwidth settings.
3757          */
3758         bp->pf->func_cfg_flags &=
3759                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3760                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3761         bp->pf->func_cfg_flags |=
3762                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3763         return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
3764 }
3765
3766 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3767 {
3768         struct bnxt_pf_resource_info pf_resc = { 0 };
3769         int rc;
3770
3771         if (!BNXT_PF(bp)) {
3772                 PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3773                 return -EINVAL;
3774         }
3775
3776         rc = bnxt_hwrm_func_qcaps(bp);
3777         if (rc)
3778                 return rc;
3779
3780         bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
3781
3782         rc = bnxt_configure_pf_resources(bp, &pf_resc);
3783         if (rc)
3784                 return rc;
3785
3786         rc = bnxt_query_pf_resources(bp, &pf_resc);
3787         if (rc)
3788                 return rc;
3789
3790         /*
3791          * Now, create and register a buffer to hold forwarded VF requests
3792          */
3793         rc = bnxt_configure_vf_req_buf(bp, num_vfs);
3794         if (rc)
3795                 return rc;
3796
3797         bnxt_configure_vf_resources(bp, num_vfs);
3798
3799         bnxt_update_pf_resources(bp, &pf_resc);
3800
3801         return 0;
3802 }
3803
3804 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3805 {
3806         struct hwrm_func_cfg_input req = {0};
3807         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3808         int rc;
3809
3810         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3811
3812         req.fid = rte_cpu_to_le_16(0xffff);
3813         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3814         req.evb_mode = bp->pf->evb_mode;
3815
3816         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3817         HWRM_CHECK_RESULT();
3818         HWRM_UNLOCK();
3819
3820         return rc;
3821 }
3822
3823 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3824                                 uint8_t tunnel_type)
3825 {
3826         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3827         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3828         int rc = 0;
3829
3830         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3831         req.tunnel_type = tunnel_type;
3832         req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3833         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3834         HWRM_CHECK_RESULT();
3835
3836         switch (tunnel_type) {
3837         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3838                 bp->vxlan_fw_dst_port_id =
3839                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3840                 bp->vxlan_port = port;
3841                 break;
3842         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3843                 bp->geneve_fw_dst_port_id =
3844                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3845                 bp->geneve_port = port;
3846                 break;
3847         default:
3848                 break;
3849         }
3850
3851         HWRM_UNLOCK();
3852
3853         return rc;
3854 }
3855
3856 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3857                                 uint8_t tunnel_type)
3858 {
3859         struct hwrm_tunnel_dst_port_free_input req = {0};
3860         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3861         int rc = 0;
3862
3863         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3864
3865         req.tunnel_type = tunnel_type;
3866         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3867         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3868
3869         HWRM_CHECK_RESULT();
3870         HWRM_UNLOCK();
3871
3872         if (tunnel_type ==
3873             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
3874                 bp->vxlan_port = 0;
3875                 bp->vxlan_port_cnt = 0;
3876         }
3877
3878         if (tunnel_type ==
3879             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
3880                 bp->geneve_port = 0;
3881                 bp->geneve_port_cnt = 0;
3882         }
3883
3884         return rc;
3885 }
3886
3887 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3888                                         uint32_t flags)
3889 {
3890         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3891         struct hwrm_func_cfg_input req = {0};
3892         int rc;
3893
3894         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3895
3896         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3897         req.flags = rte_cpu_to_le_32(flags);
3898         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3899
3900         HWRM_CHECK_RESULT();
3901         HWRM_UNLOCK();
3902
3903         return rc;
3904 }
3905
3906 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3907 {
3908         uint32_t *flag = flagp;
3909
3910         vnic->flags = *flag;
3911 }
3912
3913 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3914 {
3915         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3916 }
3917
3918 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
3919 {
3920         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3921         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3922         int rc;
3923
3924         HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3925
3926         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3927         req.req_buf_page_size =
3928                 rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
3929         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3930         req.req_buf_page_addr0 =
3931                 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
3932         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3933                 PMD_DRV_LOG(ERR,
3934                         "unable to map buffer address to physical memory\n");
3935                 HWRM_UNLOCK();
3936                 return -ENOMEM;
3937         }
3938
3939         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3940
3941         HWRM_CHECK_RESULT();
3942         HWRM_UNLOCK();
3943
3944         return rc;
3945 }
3946
3947 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3948 {
3949         int rc = 0;
3950         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3951         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3952
3953         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3954                 return 0;
3955
3956         HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3957
3958         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3959
3960         HWRM_CHECK_RESULT();
3961         HWRM_UNLOCK();
3962
3963         return rc;
3964 }
3965
3966 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3967 {
3968         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3969         struct hwrm_func_cfg_input req = {0};
3970         int rc;
3971
3972         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3973
3974         req.fid = rte_cpu_to_le_16(0xffff);
3975         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3976         req.enables = rte_cpu_to_le_32(
3977                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3978         req.async_event_cr = rte_cpu_to_le_16(
3979                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3980         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3981
3982         HWRM_CHECK_RESULT();
3983         HWRM_UNLOCK();
3984
3985         return rc;
3986 }
3987
3988 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3989 {
3990         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3991         struct hwrm_func_vf_cfg_input req = {0};
3992         int rc;
3993
3994         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3995
3996         req.enables = rte_cpu_to_le_32(
3997                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3998         req.async_event_cr = rte_cpu_to_le_16(
3999                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4000         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4001
4002         HWRM_CHECK_RESULT();
4003         HWRM_UNLOCK();
4004
4005         return rc;
4006 }
4007
4008 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
4009 {
4010         struct hwrm_func_cfg_input req = {0};
4011         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4012         uint16_t dflt_vlan, fid;
4013         uint32_t func_cfg_flags;
4014         int rc = 0;
4015
4016         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4017
4018         if (is_vf) {
4019                 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
4020                 fid = bp->pf->vf_info[vf].fid;
4021                 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
4022         } else {
4023                 fid = rte_cpu_to_le_16(0xffff);
4024                 func_cfg_flags = bp->pf->func_cfg_flags;
4025                 dflt_vlan = bp->vlan;
4026         }
4027
4028         req.flags = rte_cpu_to_le_32(func_cfg_flags);
4029         req.fid = rte_cpu_to_le_16(fid);
4030         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4031         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
4032
4033         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4034
4035         HWRM_CHECK_RESULT();
4036         HWRM_UNLOCK();
4037
4038         return rc;
4039 }
4040
4041 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
4042                         uint16_t max_bw, uint16_t enables)
4043 {
4044         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4045         struct hwrm_func_cfg_input req = {0};
4046         int rc;
4047
4048         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4049
4050         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4051         req.enables |= rte_cpu_to_le_32(enables);
4052         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4053         req.max_bw = rte_cpu_to_le_32(max_bw);
4054         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4055
4056         HWRM_CHECK_RESULT();
4057         HWRM_UNLOCK();
4058
4059         return rc;
4060 }
4061
4062 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
4063 {
4064         struct hwrm_func_cfg_input req = {0};
4065         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4066         int rc = 0;
4067
4068         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4069
4070         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4071         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4072         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4073         req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
4074
4075         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4076
4077         HWRM_CHECK_RESULT();
4078         HWRM_UNLOCK();
4079
4080         return rc;
4081 }
4082
4083 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
4084 {
4085         int rc;
4086
4087         if (BNXT_PF(bp))
4088                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
4089         else
4090                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
4091
4092         return rc;
4093 }
4094
4095 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
4096                               void *encaped, size_t ec_size)
4097 {
4098         int rc = 0;
4099         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
4100         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4101
4102         if (ec_size > sizeof(req.encap_request))
4103                 return -1;
4104
4105         HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
4106
4107         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4108         memcpy(req.encap_request, encaped, ec_size);
4109
4110         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4111
4112         HWRM_CHECK_RESULT();
4113         HWRM_UNLOCK();
4114
4115         return rc;
4116 }
4117
4118 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
4119                                        struct rte_ether_addr *mac)
4120 {
4121         struct hwrm_func_qcfg_input req = {0};
4122         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4123         int rc;
4124
4125         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4126
4127         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4128         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4129
4130         HWRM_CHECK_RESULT();
4131
4132         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
4133
4134         HWRM_UNLOCK();
4135
4136         return rc;
4137 }
4138
4139 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
4140                             void *encaped, size_t ec_size)
4141 {
4142         int rc = 0;
4143         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
4144         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4145
4146         if (ec_size > sizeof(req.encap_request))
4147                 return -1;
4148
4149         HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
4150
4151         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4152         memcpy(req.encap_request, encaped, ec_size);
4153
4154         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4155
4156         HWRM_CHECK_RESULT();
4157         HWRM_UNLOCK();
4158
4159         return rc;
4160 }
4161
4162 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
4163                          struct rte_eth_stats *stats, uint8_t rx)
4164 {
4165         int rc = 0;
4166         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
4167         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
4168
4169         HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
4170
4171         req.stat_ctx_id = rte_cpu_to_le_32(cid);
4172
4173         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4174
4175         HWRM_CHECK_RESULT();
4176
4177         if (rx) {
4178                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4179                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
4180                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
4181                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4182                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
4183                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
4184                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
4185                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
4186         } else {
4187                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4188                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
4189                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
4190                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4191                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
4192                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
4193         }
4194
4195         HWRM_UNLOCK();
4196
4197         return rc;
4198 }
4199
4200 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4201 {
4202         struct hwrm_port_qstats_input req = {0};
4203         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4204         struct bnxt_pf_info *pf = bp->pf;
4205         int rc;
4206
4207         HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4208
4209         req.port_id = rte_cpu_to_le_16(pf->port_id);
4210         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4211         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4212         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4213
4214         HWRM_CHECK_RESULT();
4215         HWRM_UNLOCK();
4216
4217         return rc;
4218 }
4219
4220 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4221 {
4222         struct hwrm_port_clr_stats_input req = {0};
4223         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4224         struct bnxt_pf_info *pf = bp->pf;
4225         int rc;
4226
4227         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
4228         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4229             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4230                 return 0;
4231
4232         HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4233
4234         req.port_id = rte_cpu_to_le_16(pf->port_id);
4235         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4236
4237         HWRM_CHECK_RESULT();
4238         HWRM_UNLOCK();
4239
4240         return rc;
4241 }
4242
4243 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4244 {
4245         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4246         struct hwrm_port_led_qcaps_input req = {0};
4247         int rc;
4248
4249         if (BNXT_VF(bp))
4250                 return 0;
4251
4252         HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4253         req.port_id = bp->pf->port_id;
4254         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4255
4256         HWRM_CHECK_RESULT();
4257
4258         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4259                 unsigned int i;
4260
4261                 bp->leds->num_leds = resp->num_leds;
4262                 memcpy(bp->leds, &resp->led0_id,
4263                         sizeof(bp->leds[0]) * bp->leds->num_leds);
4264                 for (i = 0; i < bp->leds->num_leds; i++) {
4265                         struct bnxt_led_info *led = &bp->leds[i];
4266
4267                         uint16_t caps = led->led_state_caps;
4268
4269                         if (!led->led_group_id ||
4270                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4271                                 bp->leds->num_leds = 0;
4272                                 break;
4273                         }
4274                 }
4275         }
4276
4277         HWRM_UNLOCK();
4278
4279         return rc;
4280 }
4281
4282 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4283 {
4284         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4285         struct hwrm_port_led_cfg_input req = {0};
4286         struct bnxt_led_cfg *led_cfg;
4287         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4288         uint16_t duration = 0;
4289         int rc, i;
4290
4291         if (!bp->leds->num_leds || BNXT_VF(bp))
4292                 return -EOPNOTSUPP;
4293
4294         HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4295
4296         if (led_on) {
4297                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4298                 duration = rte_cpu_to_le_16(500);
4299         }
4300         req.port_id = bp->pf->port_id;
4301         req.num_leds = bp->leds->num_leds;
4302         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4303         for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4304                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4305                 led_cfg->led_id = bp->leds[i].led_id;
4306                 led_cfg->led_state = led_state;
4307                 led_cfg->led_blink_on = duration;
4308                 led_cfg->led_blink_off = duration;
4309                 led_cfg->led_group_id = bp->leds[i].led_group_id;
4310         }
4311
4312         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4313
4314         HWRM_CHECK_RESULT();
4315         HWRM_UNLOCK();
4316
4317         return rc;
4318 }
4319
4320 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4321                                uint32_t *length)
4322 {
4323         int rc;
4324         struct hwrm_nvm_get_dir_info_input req = {0};
4325         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4326
4327         HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4328
4329         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4330
4331         HWRM_CHECK_RESULT();
4332
4333         *entries = rte_le_to_cpu_32(resp->entries);
4334         *length = rte_le_to_cpu_32(resp->entry_length);
4335
4336         HWRM_UNLOCK();
4337         return rc;
4338 }
4339
4340 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4341 {
4342         int rc;
4343         uint32_t dir_entries;
4344         uint32_t entry_length;
4345         uint8_t *buf;
4346         size_t buflen;
4347         rte_iova_t dma_handle;
4348         struct hwrm_nvm_get_dir_entries_input req = {0};
4349         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4350
4351         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4352         if (rc != 0)
4353                 return rc;
4354
4355         *data++ = dir_entries;
4356         *data++ = entry_length;
4357         len -= 2;
4358         memset(data, 0xff, len);
4359
4360         buflen = dir_entries * entry_length;
4361         buf = rte_malloc("nvm_dir", buflen, 0);
4362         if (buf == NULL)
4363                 return -ENOMEM;
4364         dma_handle = rte_malloc_virt2iova(buf);
4365         if (dma_handle == RTE_BAD_IOVA) {
4366                 rte_free(buf);
4367                 PMD_DRV_LOG(ERR,
4368                         "unable to map response address to physical memory\n");
4369                 return -ENOMEM;
4370         }
4371         HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4372         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4373         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4374
4375         if (rc == 0)
4376                 memcpy(data, buf, len > buflen ? buflen : len);
4377
4378         rte_free(buf);
4379         HWRM_CHECK_RESULT();
4380         HWRM_UNLOCK();
4381
4382         return rc;
4383 }
4384
4385 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4386                              uint32_t offset, uint32_t length,
4387                              uint8_t *data)
4388 {
4389         int rc;
4390         uint8_t *buf;
4391         rte_iova_t dma_handle;
4392         struct hwrm_nvm_read_input req = {0};
4393         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4394
4395         buf = rte_malloc("nvm_item", length, 0);
4396         if (!buf)
4397                 return -ENOMEM;
4398
4399         dma_handle = rte_malloc_virt2iova(buf);
4400         if (dma_handle == RTE_BAD_IOVA) {
4401                 rte_free(buf);
4402                 PMD_DRV_LOG(ERR,
4403                         "unable to map response address to physical memory\n");
4404                 return -ENOMEM;
4405         }
4406         HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4407         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4408         req.dir_idx = rte_cpu_to_le_16(index);
4409         req.offset = rte_cpu_to_le_32(offset);
4410         req.len = rte_cpu_to_le_32(length);
4411         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4412         if (rc == 0)
4413                 memcpy(data, buf, length);
4414
4415         rte_free(buf);
4416         HWRM_CHECK_RESULT();
4417         HWRM_UNLOCK();
4418
4419         return rc;
4420 }
4421
4422 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4423 {
4424         int rc;
4425         struct hwrm_nvm_erase_dir_entry_input req = {0};
4426         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4427
4428         HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4429         req.dir_idx = rte_cpu_to_le_16(index);
4430         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4431         HWRM_CHECK_RESULT();
4432         HWRM_UNLOCK();
4433
4434         return rc;
4435 }
4436
4437
4438 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4439                           uint16_t dir_ordinal, uint16_t dir_ext,
4440                           uint16_t dir_attr, const uint8_t *data,
4441                           size_t data_len)
4442 {
4443         int rc;
4444         struct hwrm_nvm_write_input req = {0};
4445         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4446         rte_iova_t dma_handle;
4447         uint8_t *buf;
4448
4449         buf = rte_malloc("nvm_write", data_len, 0);
4450         if (!buf)
4451                 return -ENOMEM;
4452
4453         dma_handle = rte_malloc_virt2iova(buf);
4454         if (dma_handle == RTE_BAD_IOVA) {
4455                 rte_free(buf);
4456                 PMD_DRV_LOG(ERR,
4457                         "unable to map response address to physical memory\n");
4458                 return -ENOMEM;
4459         }
4460         memcpy(buf, data, data_len);
4461
4462         HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4463
4464         req.dir_type = rte_cpu_to_le_16(dir_type);
4465         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4466         req.dir_ext = rte_cpu_to_le_16(dir_ext);
4467         req.dir_attr = rte_cpu_to_le_16(dir_attr);
4468         req.dir_data_length = rte_cpu_to_le_32(data_len);
4469         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4470
4471         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4472
4473         rte_free(buf);
4474         HWRM_CHECK_RESULT();
4475         HWRM_UNLOCK();
4476
4477         return rc;
4478 }
4479
4480 static void
4481 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4482 {
4483         uint32_t *count = cbdata;
4484
4485         *count = *count + 1;
4486 }
4487
4488 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4489                                      struct bnxt_vnic_info *vnic __rte_unused)
4490 {
4491         return 0;
4492 }
4493
4494 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4495 {
4496         uint32_t count = 0;
4497
4498         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4499             &count, bnxt_vnic_count_hwrm_stub);
4500
4501         return count;
4502 }
4503
4504 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4505                                         uint16_t *vnic_ids)
4506 {
4507         struct hwrm_func_vf_vnic_ids_query_input req = {0};
4508         struct hwrm_func_vf_vnic_ids_query_output *resp =
4509                                                 bp->hwrm_cmd_resp_addr;
4510         int rc;
4511
4512         /* First query all VNIC ids */
4513         HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4514
4515         req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4516         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4517         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4518
4519         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4520                 HWRM_UNLOCK();
4521                 PMD_DRV_LOG(ERR,
4522                 "unable to map VNIC ID table address to physical memory\n");
4523                 return -ENOMEM;
4524         }
4525         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4526         HWRM_CHECK_RESULT();
4527         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4528
4529         HWRM_UNLOCK();
4530
4531         return rc;
4532 }
4533
4534 /*
4535  * This function queries the VNIC IDs  for a specified VF. It then calls
4536  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4537  * Then it calls the hwrm_cb function to program this new vnic configuration.
4538  */
4539 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4540         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4541         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4542 {
4543         struct bnxt_vnic_info vnic;
4544         int rc = 0;
4545         int i, num_vnic_ids;
4546         uint16_t *vnic_ids;
4547         size_t vnic_id_sz;
4548         size_t sz;
4549
4550         /* First query all VNIC ids */
4551         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4552         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4553                         RTE_CACHE_LINE_SIZE);
4554         if (vnic_ids == NULL)
4555                 return -ENOMEM;
4556
4557         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4558                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4559
4560         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4561
4562         if (num_vnic_ids < 0)
4563                 return num_vnic_ids;
4564
4565         /* Retrieve VNIC, update bd_stall then update */
4566
4567         for (i = 0; i < num_vnic_ids; i++) {
4568                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4569                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4570                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4571                 if (rc)
4572                         break;
4573                 if (vnic.mru <= 4)      /* Indicates unallocated */
4574                         continue;
4575
4576                 vnic_cb(&vnic, cbdata);
4577
4578                 rc = hwrm_cb(bp, &vnic);
4579                 if (rc)
4580                         break;
4581         }
4582
4583         rte_free(vnic_ids);
4584
4585         return rc;
4586 }
4587
4588 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4589                                               bool on)
4590 {
4591         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4592         struct hwrm_func_cfg_input req = {0};
4593         int rc;
4594
4595         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4596
4597         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4598         req.enables |= rte_cpu_to_le_32(
4599                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4600         req.vlan_antispoof_mode = on ?
4601                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4602                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4603         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4604
4605         HWRM_CHECK_RESULT();
4606         HWRM_UNLOCK();
4607
4608         return rc;
4609 }
4610
4611 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4612 {
4613         struct bnxt_vnic_info vnic;
4614         uint16_t *vnic_ids;
4615         size_t vnic_id_sz;
4616         int num_vnic_ids, i;
4617         size_t sz;
4618         int rc;
4619
4620         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4621         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4622                         RTE_CACHE_LINE_SIZE);
4623         if (vnic_ids == NULL)
4624                 return -ENOMEM;
4625
4626         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4627                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4628
4629         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4630         if (rc <= 0)
4631                 goto exit;
4632         num_vnic_ids = rc;
4633
4634         /*
4635          * Loop through to find the default VNIC ID.
4636          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4637          * by sending the hwrm_func_qcfg command to the firmware.
4638          */
4639         for (i = 0; i < num_vnic_ids; i++) {
4640                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4641                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4642                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4643                                         bp->pf->first_vf_id + vf);
4644                 if (rc)
4645                         goto exit;
4646                 if (vnic.func_default) {
4647                         rte_free(vnic_ids);
4648                         return vnic.fw_vnic_id;
4649                 }
4650         }
4651         /* Could not find a default VNIC. */
4652         PMD_DRV_LOG(ERR, "No default VNIC\n");
4653 exit:
4654         rte_free(vnic_ids);
4655         return rc;
4656 }
4657
4658 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4659                          uint16_t dst_id,
4660                          struct bnxt_filter_info *filter)
4661 {
4662         int rc = 0;
4663         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4664         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4665         uint32_t enables = 0;
4666
4667         if (filter->fw_em_filter_id != UINT64_MAX)
4668                 bnxt_hwrm_clear_em_filter(bp, filter);
4669
4670         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4671
4672         req.flags = rte_cpu_to_le_32(filter->flags);
4673
4674         enables = filter->enables |
4675               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4676         req.dst_id = rte_cpu_to_le_16(dst_id);
4677
4678         if (filter->ip_addr_type) {
4679                 req.ip_addr_type = filter->ip_addr_type;
4680                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4681         }
4682         if (enables &
4683             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4684                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4685         if (enables &
4686             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4687                 memcpy(req.src_macaddr, filter->src_macaddr,
4688                        RTE_ETHER_ADDR_LEN);
4689         if (enables &
4690             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4691                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4692                        RTE_ETHER_ADDR_LEN);
4693         if (enables &
4694             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4695                 req.ovlan_vid = filter->l2_ovlan;
4696         if (enables &
4697             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4698                 req.ivlan_vid = filter->l2_ivlan;
4699         if (enables &
4700             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4701                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4702         if (enables &
4703             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4704                 req.ip_protocol = filter->ip_protocol;
4705         if (enables &
4706             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4707                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4708         if (enables &
4709             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4710                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4711         if (enables &
4712             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4713                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4714         if (enables &
4715             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4716                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4717         if (enables &
4718             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4719                 req.mirror_vnic_id = filter->mirror_vnic_id;
4720
4721         req.enables = rte_cpu_to_le_32(enables);
4722
4723         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4724
4725         HWRM_CHECK_RESULT();
4726
4727         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4728         HWRM_UNLOCK();
4729
4730         return rc;
4731 }
4732
4733 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4734 {
4735         int rc = 0;
4736         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4737         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4738
4739         if (filter->fw_em_filter_id == UINT64_MAX)
4740                 return 0;
4741
4742         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4743
4744         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4745
4746         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4747
4748         HWRM_CHECK_RESULT();
4749         HWRM_UNLOCK();
4750
4751         filter->fw_em_filter_id = UINT64_MAX;
4752         filter->fw_l2_filter_id = UINT64_MAX;
4753
4754         return 0;
4755 }
4756
4757 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4758                          uint16_t dst_id,
4759                          struct bnxt_filter_info *filter)
4760 {
4761         int rc = 0;
4762         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4763         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4764                                                 bp->hwrm_cmd_resp_addr;
4765         uint32_t enables = 0;
4766
4767         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4768                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4769
4770         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4771
4772         req.flags = rte_cpu_to_le_32(filter->flags);
4773
4774         enables = filter->enables |
4775               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4776         req.dst_id = rte_cpu_to_le_16(dst_id);
4777
4778         if (filter->ip_addr_type) {
4779                 req.ip_addr_type = filter->ip_addr_type;
4780                 enables |=
4781                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4782         }
4783         if (enables &
4784             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4785                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4786         if (enables &
4787             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4788                 memcpy(req.src_macaddr, filter->src_macaddr,
4789                        RTE_ETHER_ADDR_LEN);
4790         if (enables &
4791             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4792                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4793         if (enables &
4794             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4795                 req.ip_protocol = filter->ip_protocol;
4796         if (enables &
4797             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4798                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4799         if (enables &
4800             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4801                 req.src_ipaddr_mask[0] =
4802                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4803         if (enables &
4804             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4805                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4806         if (enables &
4807             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4808                 req.dst_ipaddr_mask[0] =
4809                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4810         if (enables &
4811             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4812                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4813         if (enables &
4814             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4815                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4816         if (enables &
4817             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4818                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4819         if (enables &
4820             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4821                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4822         if (enables &
4823             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4824                 req.mirror_vnic_id = filter->mirror_vnic_id;
4825
4826         req.enables = rte_cpu_to_le_32(enables);
4827
4828         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4829
4830         HWRM_CHECK_RESULT();
4831
4832         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4833         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4834         HWRM_UNLOCK();
4835
4836         return rc;
4837 }
4838
4839 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4840                                 struct bnxt_filter_info *filter)
4841 {
4842         int rc = 0;
4843         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4844         struct hwrm_cfa_ntuple_filter_free_output *resp =
4845                                                 bp->hwrm_cmd_resp_addr;
4846
4847         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4848                 return 0;
4849
4850         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4851
4852         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4853
4854         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4855
4856         HWRM_CHECK_RESULT();
4857         HWRM_UNLOCK();
4858
4859         filter->fw_ntuple_filter_id = UINT64_MAX;
4860
4861         return 0;
4862 }
4863
4864 static int
4865 bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4866 {
4867         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4868         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4869         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4870         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4871         uint16_t *ring_tbl = vnic->rss_table;
4872         int nr_ctxs = vnic->num_lb_ctxts;
4873         int max_rings = bp->rx_nr_rings;
4874         int i, j, k, cnt;
4875         int rc = 0;
4876
4877         for (i = 0, k = 0; i < nr_ctxs; i++) {
4878                 struct bnxt_rx_ring_info *rxr;
4879                 struct bnxt_cp_ring_info *cpr;
4880
4881                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4882
4883                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4884                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4885                 req.hash_mode_flags = vnic->hash_mode;
4886
4887                 req.ring_grp_tbl_addr =
4888                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4889                                      i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
4890                                      2 * sizeof(*ring_tbl));
4891                 req.hash_key_tbl_addr =
4892                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4893
4894                 req.ring_table_pair_index = i;
4895                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4896
4897                 for (j = 0; j < 64; j++) {
4898                         uint16_t ring_id;
4899
4900                         /* Find next active ring. */
4901                         for (cnt = 0; cnt < max_rings; cnt++) {
4902                                 if (rx_queue_state[k] !=
4903                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4904                                         break;
4905                                 if (++k == max_rings)
4906                                         k = 0;
4907                         }
4908
4909                         /* Return if no rings are active. */
4910                         if (cnt == max_rings) {
4911                                 HWRM_UNLOCK();
4912                                 return 0;
4913                         }
4914
4915                         /* Add rx/cp ring pair to RSS table. */
4916                         rxr = rxqs[k]->rx_ring;
4917                         cpr = rxqs[k]->cp_ring;
4918
4919                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4920                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4921                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4922                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4923
4924                         if (++k == max_rings)
4925                                 k = 0;
4926                 }
4927                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4928                                             BNXT_USE_CHIMP_MB);
4929
4930                 HWRM_CHECK_RESULT();
4931                 HWRM_UNLOCK();
4932         }
4933
4934         return rc;
4935 }
4936
4937 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4938 {
4939         unsigned int rss_idx, fw_idx, i;
4940
4941         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4942                 return 0;
4943
4944         if (!(vnic->rss_table && vnic->hash_type))
4945                 return 0;
4946
4947         if (BNXT_CHIP_P5(bp))
4948                 return bnxt_vnic_rss_configure_p5(bp, vnic);
4949
4950         /*
4951          * Fill the RSS hash & redirection table with
4952          * ring group ids for all VNICs
4953          */
4954         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4955              rss_idx++, fw_idx++) {
4956                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4957                         fw_idx %= bp->rx_cp_nr_rings;
4958                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
4959                                 break;
4960                         fw_idx++;
4961                 }
4962
4963                 if (i == bp->rx_cp_nr_rings)
4964                         return 0;
4965
4966                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4967         }
4968
4969         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4970 }
4971
4972 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4973         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4974 {
4975         uint16_t flags;
4976
4977         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4978
4979         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4980         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4981
4982         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4983         req->num_cmpl_dma_aggr_during_int =
4984                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4985
4986         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4987
4988         /* min timer set to 1/2 of interrupt timer */
4989         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4990
4991         /* buf timer set to 1/4 of interrupt timer */
4992         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4993
4994         req->cmpl_aggr_dma_tmr_during_int =
4995                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4996
4997         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4998                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4999         req->flags = rte_cpu_to_le_16(flags);
5000 }
5001
5002 static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
5003                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
5004 {
5005         struct hwrm_ring_aggint_qcaps_input req = {0};
5006         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5007         uint32_t enables;
5008         uint16_t flags;
5009         int rc;
5010
5011         HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
5012         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5013         HWRM_CHECK_RESULT();
5014
5015         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
5016         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
5017
5018         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5019                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5020         agg_req->flags = rte_cpu_to_le_16(flags);
5021         enables =
5022          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
5023          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
5024         agg_req->enables = rte_cpu_to_le_32(enables);
5025
5026         HWRM_UNLOCK();
5027         return rc;
5028 }
5029
5030 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
5031                         struct bnxt_coal *coal, uint16_t ring_id)
5032 {
5033         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5034         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
5035                                                 bp->hwrm_cmd_resp_addr;
5036         int rc;
5037
5038         /* Set ring coalesce parameters only for 100G NICs */
5039         if (BNXT_CHIP_P5(bp)) {
5040                 if (bnxt_hwrm_set_coal_params_p5(bp, &req))
5041                         return -1;
5042         } else if (bnxt_stratus_device(bp)) {
5043                 bnxt_hwrm_set_coal_params(coal, &req);
5044         } else {
5045                 return 0;
5046         }
5047
5048         HWRM_PREP(&req,
5049                   HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5050                   BNXT_USE_CHIMP_MB);
5051         req.ring_id = rte_cpu_to_le_16(ring_id);
5052         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5053         HWRM_CHECK_RESULT();
5054         HWRM_UNLOCK();
5055         return 0;
5056 }
5057
5058 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
5059 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5060 {
5061         struct hwrm_func_backing_store_qcaps_input req = {0};
5062         struct hwrm_func_backing_store_qcaps_output *resp =
5063                 bp->hwrm_cmd_resp_addr;
5064         struct bnxt_ctx_pg_info *ctx_pg;
5065         struct bnxt_ctx_mem_info *ctx;
5066         int total_alloc_len;
5067         int rc, i, tqm_rings;
5068
5069         if (!BNXT_CHIP_P5(bp) ||
5070             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
5071             BNXT_VF(bp) ||
5072             bp->ctx)
5073                 return 0;
5074
5075         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
5076         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5077         HWRM_CHECK_RESULT_SILENT();
5078
5079         total_alloc_len = sizeof(*ctx);
5080         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
5081                           RTE_CACHE_LINE_SIZE);
5082         if (!ctx) {
5083                 rc = -ENOMEM;
5084                 goto ctx_err;
5085         }
5086
5087         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
5088         ctx->qp_min_qp1_entries =
5089                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
5090         ctx->qp_max_l2_entries =
5091                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
5092         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
5093         ctx->srq_max_l2_entries =
5094                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
5095         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
5096         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
5097         ctx->cq_max_l2_entries =
5098                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
5099         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
5100         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
5101         ctx->vnic_max_vnic_entries =
5102                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
5103         ctx->vnic_max_ring_table_entries =
5104                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
5105         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
5106         ctx->stat_max_entries =
5107                 rte_le_to_cpu_32(resp->stat_max_entries);
5108         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
5109         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
5110         ctx->tqm_min_entries_per_ring =
5111                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
5112         ctx->tqm_max_entries_per_ring =
5113                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
5114         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5115         if (!ctx->tqm_entries_multiple)
5116                 ctx->tqm_entries_multiple = 1;
5117         ctx->mrav_max_entries =
5118                 rte_le_to_cpu_32(resp->mrav_max_entries);
5119         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
5120         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
5121         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
5122         ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
5123
5124         if (!ctx->tqm_fp_rings_count)
5125                 ctx->tqm_fp_rings_count = bp->max_q;
5126
5127         tqm_rings = ctx->tqm_fp_rings_count + 1;
5128
5129         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
5130                             sizeof(*ctx_pg) * tqm_rings,
5131                             RTE_CACHE_LINE_SIZE);
5132         if (!ctx_pg) {
5133                 rc = -ENOMEM;
5134                 goto ctx_err;
5135         }
5136         for (i = 0; i < tqm_rings; i++, ctx_pg++)
5137                 ctx->tqm_mem[i] = ctx_pg;
5138
5139         bp->ctx = ctx;
5140 ctx_err:
5141         HWRM_UNLOCK();
5142         return rc;
5143 }
5144
5145 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
5146 {
5147         struct hwrm_func_backing_store_cfg_input req = {0};
5148         struct hwrm_func_backing_store_cfg_output *resp =
5149                 bp->hwrm_cmd_resp_addr;
5150         struct bnxt_ctx_mem_info *ctx = bp->ctx;
5151         struct bnxt_ctx_pg_info *ctx_pg;
5152         uint32_t *num_entries;
5153         uint64_t *pg_dir;
5154         uint8_t *pg_attr;
5155         uint32_t ena;
5156         int i, rc;
5157
5158         if (!ctx)
5159                 return 0;
5160
5161         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
5162         req.enables = rte_cpu_to_le_32(enables);
5163
5164         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
5165                 ctx_pg = &ctx->qp_mem;
5166                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5167                 req.qp_num_qp1_entries =
5168                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
5169                 req.qp_num_l2_entries =
5170                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
5171                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
5172                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5173                                       &req.qpc_pg_size_qpc_lvl,
5174                                       &req.qpc_page_dir);
5175         }
5176
5177         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5178                 ctx_pg = &ctx->srq_mem;
5179                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5180                 req.srq_num_l2_entries =
5181                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5182                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5183                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5184                                       &req.srq_pg_size_srq_lvl,
5185                                       &req.srq_page_dir);
5186         }
5187
5188         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5189                 ctx_pg = &ctx->cq_mem;
5190                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5191                 req.cq_num_l2_entries =
5192                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5193                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5194                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5195                                       &req.cq_pg_size_cq_lvl,
5196                                       &req.cq_page_dir);
5197         }
5198
5199         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5200                 ctx_pg = &ctx->vnic_mem;
5201                 req.vnic_num_vnic_entries =
5202                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5203                 req.vnic_num_ring_table_entries =
5204                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5205                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5206                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5207                                       &req.vnic_pg_size_vnic_lvl,
5208                                       &req.vnic_page_dir);
5209         }
5210
5211         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5212                 ctx_pg = &ctx->stat_mem;
5213                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5214                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5215                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5216                                       &req.stat_pg_size_stat_lvl,
5217                                       &req.stat_page_dir);
5218         }
5219
5220         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5221         num_entries = &req.tqm_sp_num_entries;
5222         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5223         pg_dir = &req.tqm_sp_page_dir;
5224         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5225         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5226                 if (!(enables & ena))
5227                         continue;
5228
5229                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5230
5231                 ctx_pg = ctx->tqm_mem[i];
5232                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5233                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5234         }
5235
5236         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5237         HWRM_CHECK_RESULT();
5238         HWRM_UNLOCK();
5239
5240         return rc;
5241 }
5242
5243 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5244 {
5245         struct hwrm_port_qstats_ext_input req = {0};
5246         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5247         struct bnxt_pf_info *pf = bp->pf;
5248         int rc;
5249
5250         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5251               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5252                 return 0;
5253
5254         HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5255
5256         req.port_id = rte_cpu_to_le_16(pf->port_id);
5257         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5258                 req.tx_stat_host_addr =
5259                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5260                 req.tx_stat_size =
5261                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5262         }
5263         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5264                 req.rx_stat_host_addr =
5265                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5266                 req.rx_stat_size =
5267                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5268         }
5269         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5270
5271         if (rc) {
5272                 bp->fw_rx_port_stats_ext_size = 0;
5273                 bp->fw_tx_port_stats_ext_size = 0;
5274         } else {
5275                 bp->fw_rx_port_stats_ext_size =
5276                         rte_le_to_cpu_16(resp->rx_stat_size);
5277                 bp->fw_tx_port_stats_ext_size =
5278                         rte_le_to_cpu_16(resp->tx_stat_size);
5279         }
5280
5281         HWRM_CHECK_RESULT();
5282         HWRM_UNLOCK();
5283
5284         return rc;
5285 }
5286
5287 int
5288 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5289 {
5290         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5291         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5292                 bp->hwrm_cmd_resp_addr;
5293         int rc = 0;
5294
5295         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5296         req.tunnel_type = type;
5297         req.dest_fid = bp->fw_fid;
5298         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5299         HWRM_CHECK_RESULT();
5300
5301         HWRM_UNLOCK();
5302
5303         return rc;
5304 }
5305
5306 int
5307 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5308 {
5309         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5310         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5311                 bp->hwrm_cmd_resp_addr;
5312         int rc = 0;
5313
5314         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5315         req.tunnel_type = type;
5316         req.dest_fid = bp->fw_fid;
5317         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5318         HWRM_CHECK_RESULT();
5319
5320         HWRM_UNLOCK();
5321
5322         return rc;
5323 }
5324
5325 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5326 {
5327         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5328         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5329                 bp->hwrm_cmd_resp_addr;
5330         int rc = 0;
5331
5332         HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5333         req.src_fid = bp->fw_fid;
5334         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5335         HWRM_CHECK_RESULT();
5336
5337         if (type)
5338                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5339
5340         HWRM_UNLOCK();
5341
5342         return rc;
5343 }
5344
5345 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5346                                    uint16_t *dst_fid)
5347 {
5348         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5349         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5350                 bp->hwrm_cmd_resp_addr;
5351         int rc = 0;
5352
5353         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5354         req.src_fid = bp->fw_fid;
5355         req.tunnel_type = tun_type;
5356         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5357         HWRM_CHECK_RESULT();
5358
5359         if (dst_fid)
5360                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5361
5362         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5363
5364         HWRM_UNLOCK();
5365
5366         return rc;
5367 }
5368
5369 int bnxt_hwrm_set_mac(struct bnxt *bp)
5370 {
5371         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5372         struct hwrm_func_vf_cfg_input req = {0};
5373         int rc = 0;
5374
5375         if (!BNXT_VF(bp))
5376                 return 0;
5377
5378         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5379
5380         req.enables =
5381                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5382         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5383
5384         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5385
5386         HWRM_CHECK_RESULT();
5387
5388         HWRM_UNLOCK();
5389
5390         return rc;
5391 }
5392
5393 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5394 {
5395         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5396         struct hwrm_func_drv_if_change_input req = {0};
5397         uint32_t flags;
5398         int rc;
5399
5400         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5401                 return 0;
5402
5403         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5404          * If we issue FUNC_DRV_IF_CHANGE with flags down before
5405          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5406          */
5407         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5408                 return 0;
5409
5410         HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5411
5412         if (up)
5413                 req.flags =
5414                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5415
5416         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5417
5418         HWRM_CHECK_RESULT();
5419         flags = rte_le_to_cpu_32(resp->flags);
5420         HWRM_UNLOCK();
5421
5422         if (!up)
5423                 return 0;
5424
5425         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5426                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5427                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5428         }
5429
5430         return 0;
5431 }
5432
5433 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5434 {
5435         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5436         struct bnxt_error_recovery_info *info = bp->recovery_info;
5437         struct hwrm_error_recovery_qcfg_input req = {0};
5438         uint32_t flags = 0;
5439         unsigned int i;
5440         int rc;
5441
5442         /* Older FW does not have error recovery support */
5443         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5444                 return 0;
5445
5446         HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5447
5448         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5449
5450         HWRM_CHECK_RESULT();
5451
5452         flags = rte_le_to_cpu_32(resp->flags);
5453         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5454                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5455         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5456                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5457
5458         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5459             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5460                 rc = -EINVAL;
5461                 goto err;
5462         }
5463
5464         /* FW returned values are in units of 100msec */
5465         info->driver_polling_freq =
5466                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5467         info->master_func_wait_period =
5468                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5469         info->normal_func_wait_period =
5470                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5471         info->master_func_wait_period_after_reset =
5472                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5473         info->max_bailout_time_after_reset =
5474                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5475         info->status_regs[BNXT_FW_STATUS_REG] =
5476                 rte_le_to_cpu_32(resp->fw_health_status_reg);
5477         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5478                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5479         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5480                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5481         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5482                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5483         info->reg_array_cnt =
5484                 rte_le_to_cpu_32(resp->reg_array_cnt);
5485
5486         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5487                 rc = -EINVAL;
5488                 goto err;
5489         }
5490
5491         for (i = 0; i < info->reg_array_cnt; i++) {
5492                 info->reset_reg[i] =
5493                         rte_le_to_cpu_32(resp->reset_reg[i]);
5494                 info->reset_reg_val[i] =
5495                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
5496                 info->delay_after_reset[i] =
5497                         resp->delay_after_reset[i];
5498         }
5499 err:
5500         HWRM_UNLOCK();
5501
5502         /* Map the FW status registers */
5503         if (!rc)
5504                 rc = bnxt_map_fw_health_status_regs(bp);
5505
5506         if (rc) {
5507                 rte_free(bp->recovery_info);
5508                 bp->recovery_info = NULL;
5509         }
5510         return rc;
5511 }
5512
5513 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5514 {
5515         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5516         struct hwrm_fw_reset_input req = {0};
5517         int rc;
5518
5519         if (!BNXT_PF(bp))
5520                 return -EOPNOTSUPP;
5521
5522         HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5523
5524         req.embedded_proc_type =
5525                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5526         req.selfrst_status =
5527                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5528         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5529
5530         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5531                                     BNXT_USE_KONG(bp));
5532
5533         HWRM_CHECK_RESULT();
5534         HWRM_UNLOCK();
5535
5536         return rc;
5537 }
5538
5539 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5540 {
5541         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5542         struct hwrm_port_ts_query_input req = {0};
5543         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5544         uint32_t flags = 0;
5545         int rc;
5546
5547         if (!ptp)
5548                 return 0;
5549
5550         HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5551
5552         switch (path) {
5553         case BNXT_PTP_FLAGS_PATH_TX:
5554                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5555                 break;
5556         case BNXT_PTP_FLAGS_PATH_RX:
5557                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5558                 break;
5559         case BNXT_PTP_FLAGS_CURRENT_TIME:
5560                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5561                 break;
5562         }
5563
5564         req.flags = rte_cpu_to_le_32(flags);
5565         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5566
5567         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5568
5569         HWRM_CHECK_RESULT();
5570
5571         if (timestamp) {
5572                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5573                 *timestamp |=
5574                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5575         }
5576         HWRM_UNLOCK();
5577
5578         return rc;
5579 }
5580
5581 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5582 {
5583         int rc = 0;
5584
5585         struct hwrm_cfa_counter_qcaps_input req = {0};
5586         struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5587
5588         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5589                 PMD_DRV_LOG(DEBUG,
5590                             "Not a PF or trusted VF. Command not supported\n");
5591                 return 0;
5592         }
5593
5594         HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5595         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5596         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5597
5598         HWRM_CHECK_RESULT();
5599         if (max_fc)
5600                 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5601         HWRM_UNLOCK();
5602
5603         return 0;
5604 }
5605
5606 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5607 {
5608         int rc = 0;
5609         struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5610         struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5611
5612         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5613                 PMD_DRV_LOG(DEBUG,
5614                             "Not a PF or trusted VF. Command not supported\n");
5615                 return 0;
5616         }
5617
5618         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5619
5620         req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5621         req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5622         req.page_dir = rte_cpu_to_le_64(dma_addr);
5623
5624         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5625
5626         HWRM_CHECK_RESULT();
5627         if (ctx_id) {
5628                 *ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5629                 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5630         }
5631         HWRM_UNLOCK();
5632
5633         return 0;
5634 }
5635
5636 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5637 {
5638         int rc = 0;
5639         struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5640         struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5641
5642         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5643                 PMD_DRV_LOG(DEBUG,
5644                             "Not a PF or trusted VF. Command not supported\n");
5645                 return 0;
5646         }
5647
5648         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5649
5650         req.ctx_id = rte_cpu_to_le_16(ctx_id);
5651
5652         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5653
5654         HWRM_CHECK_RESULT();
5655         HWRM_UNLOCK();
5656
5657         return rc;
5658 }
5659
5660 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5661                               uint16_t cntr, uint16_t ctx_id,
5662                               uint32_t num_entries, bool enable)
5663 {
5664         struct hwrm_cfa_counter_cfg_input req = {0};
5665         struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5666         uint16_t flags = 0;
5667         int rc;
5668
5669         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5670                 PMD_DRV_LOG(DEBUG,
5671                             "Not a PF or trusted VF. Command not supported\n");
5672                 return 0;
5673         }
5674
5675         HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5676
5677         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5678         req.counter_type = rte_cpu_to_le_16(cntr);
5679         flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5680                 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5681         flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5682         if (dir == BNXT_DIR_RX)
5683                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5684         else if (dir == BNXT_DIR_TX)
5685                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5686         req.flags = rte_cpu_to_le_16(flags);
5687         req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5688         req.num_entries = rte_cpu_to_le_32(num_entries);
5689
5690         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5691         HWRM_CHECK_RESULT();
5692         HWRM_UNLOCK();
5693
5694         return 0;
5695 }
5696
5697 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5698                                  enum bnxt_flow_dir dir,
5699                                  uint16_t cntr,
5700                                  uint16_t num_entries)
5701 {
5702         struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5703         struct hwrm_cfa_counter_qstats_input req = {0};
5704         uint16_t flow_ctx_id = 0;
5705         uint16_t flags = 0;
5706         int rc = 0;
5707
5708         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5709                 PMD_DRV_LOG(DEBUG,
5710                             "Not a PF or trusted VF. Command not supported\n");
5711                 return 0;
5712         }
5713
5714         if (dir == BNXT_DIR_RX) {
5715                 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5716                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5717         } else if (dir == BNXT_DIR_TX) {
5718                 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5719                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5720         }
5721
5722         HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5723         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5724         req.counter_type = rte_cpu_to_le_16(cntr);
5725         req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5726         req.num_entries = rte_cpu_to_le_16(num_entries);
5727         req.flags = rte_cpu_to_le_16(flags);
5728         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5729
5730         HWRM_CHECK_RESULT();
5731         HWRM_UNLOCK();
5732
5733         return 0;
5734 }
5735
5736 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5737                                 uint16_t *first_vf_id)
5738 {
5739         int rc = 0;
5740         struct hwrm_func_qcaps_input req = {.req_type = 0 };
5741         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5742
5743         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5744
5745         req.fid = rte_cpu_to_le_16(fid);
5746
5747         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5748
5749         HWRM_CHECK_RESULT();
5750
5751         if (first_vf_id)
5752                 *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
5753
5754         HWRM_UNLOCK();
5755
5756         return rc;
5757 }
5758
5759 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
5760 {
5761         struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5762         struct hwrm_cfa_pair_alloc_input req = {0};
5763         int rc;
5764
5765         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5766                 PMD_DRV_LOG(DEBUG,
5767                             "Not a PF or trusted VF. Command not supported\n");
5768                 return 0;
5769         }
5770
5771         HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
5772         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5773         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5774                  bp->eth_dev->data->name, rep_bp->vf_id);
5775
5776         req.pf_b_id = rep_bp->parent_pf_idx;
5777         req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5778                                                 rte_cpu_to_le_16(rep_bp->vf_id);
5779         req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
5780         req.host_b_id = 1; /* TBD - Confirm if this is OK */
5781
5782         req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
5783                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
5784         req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
5785                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
5786         req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
5787                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
5788         req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
5789                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
5790
5791         req.q_ab = rep_bp->rep_q_r2f;
5792         req.q_ba = rep_bp->rep_q_f2r;
5793         req.fc_ab = rep_bp->rep_fc_r2f;
5794         req.fc_ba = rep_bp->rep_fc_f2r;
5795
5796         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5797         HWRM_CHECK_RESULT();
5798
5799         HWRM_UNLOCK();
5800         PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
5801                     BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
5802         return rc;
5803 }
5804
5805 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
5806 {
5807         struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
5808         struct hwrm_cfa_pair_free_input req = {0};
5809         int rc;
5810
5811         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5812                 PMD_DRV_LOG(DEBUG,
5813                             "Not a PF or trusted VF. Command not supported\n");
5814                 return 0;
5815         }
5816
5817         HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
5818         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5819                  bp->eth_dev->data->name, rep_bp->vf_id);
5820         req.pf_b_id = rep_bp->parent_pf_idx;
5821         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5822         req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5823                                                 rte_cpu_to_le_16(rep_bp->vf_id);
5824         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5825         HWRM_CHECK_RESULT();
5826         HWRM_UNLOCK();
5827         PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
5828                     rep_bp->vf_id);
5829         return rc;
5830 }
5831
5832 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
5833 {
5834         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
5835                                         bp->hwrm_cmd_resp_addr;
5836         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
5837         uint32_t flags = 0;
5838         int rc = 0;
5839
5840         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
5841                 return 0;
5842
5843         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5844                 PMD_DRV_LOG(DEBUG,
5845                             "Not a PF or trusted VF. Command not supported\n");
5846                 return 0;
5847         }
5848
5849         HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_CHIMP_MB);
5850         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5851
5852         HWRM_CHECK_RESULT();
5853         flags = rte_le_to_cpu_32(resp->flags);
5854         HWRM_UNLOCK();
5855
5856         if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED)
5857                 bp->flags |= BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2;
5858         else
5859                 bp->flags |= BNXT_FLAG_RFS_NEEDS_VNIC;
5860
5861         return rc;
5862 }