7f51c610977f44f33a7f63f6d6cbdc8b6d047fe4
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #define HWRM_SPEC_CODE_1_8_3            0x10803
28 #define HWRM_VERSION_1_9_1              0x10901
29 #define HWRM_VERSION_1_9_2              0x10903
30 #define HWRM_VERSION_1_10_2_13          0x10a020d
31 struct bnxt_plcmodes_cfg {
32         uint32_t        flags;
33         uint16_t        jumbo_thresh;
34         uint16_t        hds_offset;
35         uint16_t        hds_threshold;
36 };
37
38 static int page_getenum(size_t size)
39 {
40         if (size <= 1 << 4)
41                 return 4;
42         if (size <= 1 << 12)
43                 return 12;
44         if (size <= 1 << 13)
45                 return 13;
46         if (size <= 1 << 16)
47                 return 16;
48         if (size <= 1 << 21)
49                 return 21;
50         if (size <= 1 << 22)
51                 return 22;
52         if (size <= 1 << 30)
53                 return 30;
54         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55         return sizeof(int) * 8 - 1;
56 }
57
58 static int page_roundup(size_t size)
59 {
60         return 1 << page_getenum(size);
61 }
62
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64                                   uint8_t *pg_attr,
65                                   uint64_t *pg_dir)
66 {
67         if (rmem->nr_pages == 0)
68                 return;
69
70         if (rmem->nr_pages > 1) {
71                 *pg_attr = 1;
72                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
73         } else {
74                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
75         }
76 }
77
78 static struct bnxt_cp_ring_info*
79 bnxt_get_ring_info_by_id(struct bnxt *bp, uint16_t rid, uint16_t type)
80 {
81         struct bnxt_cp_ring_info *cp_ring = NULL;
82         uint16_t i;
83
84         switch (type) {
85         case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
86         case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
87                 /* FALLTHROUGH */
88                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
89                         struct bnxt_rx_queue *rxq = bp->rx_queues[i];
90
91                         if (rxq->cp_ring->cp_ring_struct->fw_ring_id ==
92                             rte_cpu_to_le_16(rid)) {
93                                 return rxq->cp_ring;
94                         }
95                 }
96                 break;
97         case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
98                 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
99                         struct bnxt_tx_queue *txq = bp->tx_queues[i];
100
101                         if (txq->cp_ring->cp_ring_struct->fw_ring_id ==
102                             rte_cpu_to_le_16(rid)) {
103                                 return txq->cp_ring;
104                         }
105                 }
106                 break;
107         default:
108                 return cp_ring;
109         }
110         return cp_ring;
111 }
112
113 /* Complete a sweep of the CQ ring for the corresponding Tx/Rx/AGG ring.
114  * If the CMPL_BASE_TYPE_HWRM_DONE is not encountered by the last pass,
115  * before timeout, we force the done bit for the cleanup to proceed.
116  * Also if cpr is null, do nothing.. The HWRM command is  not for a
117  * Tx/Rx/AGG ring cleanup.
118  */
119 static int
120 bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
121                         bool tx, bool rx, bool timeout)
122 {
123         int done = 0;
124
125         if (cpr != NULL) {
126                 if (tx)
127                         done = bnxt_flush_tx_cmp(cpr);
128
129                 if (rx)
130                         done = bnxt_flush_rx_cmp(cpr);
131
132                 if (done)
133                         PMD_DRV_LOG(DEBUG, "HWRM DONE for %s ring\n",
134                                     rx ? "Rx" : "Tx");
135
136                 /* We are about to timeout and still haven't seen the
137                  * HWRM done for the Ring free. Force the cleanup.
138                  */
139                 if (!done && timeout) {
140                         done = 1;
141                         PMD_DRV_LOG(DEBUG, "Timing out for %s ring\n",
142                                     rx ? "Rx" : "Tx");
143                 }
144         } else {
145                 /* This HWRM command is not for a Tx/Rx/AGG ring cleanup.
146                  * Otherwise the cpr would have been valid. So do nothing.
147                  */
148                 done = 1;
149         }
150
151         return done;
152 }
153
154 /*
155  * HWRM Functions (sent to HWRM)
156  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
157  * HWRM command times out, or a negative error code if the HWRM
158  * command was failed by the FW.
159  */
160
161 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
162                                   uint32_t msg_len, bool use_kong_mb)
163 {
164         unsigned int i;
165         struct input *req = msg;
166         struct output *resp = bp->hwrm_cmd_resp_addr;
167         uint32_t *data = msg;
168         uint8_t *bar;
169         uint8_t *valid;
170         uint16_t max_req_len = bp->max_req_len;
171         struct hwrm_short_input short_input = { 0 };
172         uint16_t bar_offset = use_kong_mb ?
173                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
174         uint16_t mb_trigger_offset = use_kong_mb ?
175                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
176         struct bnxt_cp_ring_info *cpr = NULL;
177         bool is_rx = false;
178         bool is_tx = false;
179         uint32_t timeout;
180
181         /* Do not send HWRM commands to firmware in error state */
182         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
183                 return 0;
184
185         timeout = bp->hwrm_cmd_timeout;
186
187         /* Update the message length for backing store config for new FW. */
188         if (bp->fw_ver >= HWRM_VERSION_1_10_2_13 &&
189             rte_cpu_to_le_16(req->req_type) == HWRM_FUNC_BACKING_STORE_CFG)
190                 msg_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
191
192         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
193             msg_len > bp->max_req_len) {
194                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
195
196                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
197                 memcpy(short_cmd_req, req, msg_len);
198
199                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
200                 short_input.signature = rte_cpu_to_le_16(
201                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
202                 short_input.size = rte_cpu_to_le_16(msg_len);
203                 short_input.req_addr =
204                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
205
206                 data = (uint32_t *)&short_input;
207                 msg_len = sizeof(short_input);
208
209                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
210         }
211
212         /* Write request msg to hwrm channel */
213         for (i = 0; i < msg_len; i += 4) {
214                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
215                 rte_write32(*data, bar);
216                 data++;
217         }
218
219         /* Zero the rest of the request space */
220         for (; i < max_req_len; i += 4) {
221                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
222                 rte_write32(0, bar);
223         }
224
225         /* Ring channel doorbell */
226         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
227         rte_write32(1, bar);
228         /*
229          * Make sure the channel doorbell ring command complete before
230          * reading the response to avoid getting stale or invalid
231          * responses.
232          */
233         rte_io_mb();
234
235         /* Check ring flush is done.
236          * This is valid only for Tx and Rx rings (including AGG rings).
237          * The Tx and Rx rings should be freed once the HW confirms all
238          * the internal buffers and BDs associated with the rings are
239          * consumed and the corresponding DMA is handled.
240          */
241         if (rte_cpu_to_le_16(req->cmpl_ring) != INVALID_HW_RING_ID) {
242                 /* Check if the TxCQ matches. If that fails check if RxCQ
243                  * matches. And if neither match, is_rx = false, is_tx = false.
244                  */
245                 cpr = bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
246                                                HWRM_RING_FREE_INPUT_RING_TYPE_TX);
247                 if (cpr == NULL) {
248                         /* Not a TxCQ. Check if the RxCQ matches. */
249                         cpr =
250                         bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
251                                                  HWRM_RING_FREE_INPUT_RING_TYPE_RX);
252                         if (cpr != NULL)
253                                 is_rx = true;
254                 } else {
255                         is_tx = true;
256                 }
257         }
258
259         /* Poll for the valid bit */
260         for (i = 0; i < timeout; i++) {
261                 int done;
262
263                 done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx,
264                                                i == timeout - 1);
265                 /* Sanity check on the resp->resp_len */
266                 rte_io_rmb();
267                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
268                         /* Last byte of resp contains the valid key */
269                         valid = (uint8_t *)resp + resp->resp_len - 1;
270                         if (*valid == HWRM_RESP_VALID_KEY && done)
271                                 break;
272                 }
273                 rte_delay_us(1);
274         }
275
276         if (i >= timeout) {
277                 /* Suppress VER_GET timeout messages during reset recovery */
278                 if (bp->flags & BNXT_FLAG_FW_RESET &&
279                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
280                         return -ETIMEDOUT;
281
282                 PMD_DRV_LOG(ERR,
283                             "Error(timeout) sending msg 0x%04x, seq_id %d\n",
284                             req->req_type, req->seq_id);
285                 return -ETIMEDOUT;
286         }
287         return 0;
288 }
289
290 /*
291  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
292  * spinlock, and does initial processing.
293  *
294  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
295  * releases the spinlock only if it returns. If the regular int return codes
296  * are not used by the function, HWRM_CHECK_RESULT() should not be used
297  * directly, rather it should be copied and modified to suit the function.
298  *
299  * HWRM_UNLOCK() must be called after all response processing is completed.
300  */
301 #define HWRM_PREP(req, type, kong) do { \
302         rte_spinlock_lock(&bp->hwrm_lock); \
303         if (bp->hwrm_cmd_resp_addr == NULL) { \
304                 rte_spinlock_unlock(&bp->hwrm_lock); \
305                 return -EACCES; \
306         } \
307         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
308         (req)->req_type = rte_cpu_to_le_16(type); \
309         (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
310         (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
311                 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
312         (req)->target_id = rte_cpu_to_le_16(0xffff); \
313         (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
314 } while (0)
315
316 #define HWRM_CHECK_RESULT_SILENT() do {\
317         if (rc) { \
318                 rte_spinlock_unlock(&bp->hwrm_lock); \
319                 return rc; \
320         } \
321         if (resp->error_code) { \
322                 rc = rte_le_to_cpu_16(resp->error_code); \
323                 rte_spinlock_unlock(&bp->hwrm_lock); \
324                 return rc; \
325         } \
326 } while (0)
327
328 #define HWRM_CHECK_RESULT() do {\
329         if (rc) { \
330                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
331                 rte_spinlock_unlock(&bp->hwrm_lock); \
332                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
333                         rc = -EACCES; \
334                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
335                         rc = -ENOSPC; \
336                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
337                         rc = -EINVAL; \
338                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
339                         rc = -ENOTSUP; \
340                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
341                         rc = -EAGAIN; \
342                 else if (rc > 0) \
343                         rc = -EIO; \
344                 return rc; \
345         } \
346         if (resp->error_code) { \
347                 rc = rte_le_to_cpu_16(resp->error_code); \
348                 if (resp->resp_len >= 16) { \
349                         struct hwrm_err_output *tmp_hwrm_err_op = \
350                                                 (void *)resp; \
351                         PMD_DRV_LOG(ERR, \
352                                 "error %d:%d:%08x:%04x\n", \
353                                 rc, tmp_hwrm_err_op->cmd_err, \
354                                 rte_le_to_cpu_32(\
355                                         tmp_hwrm_err_op->opaque_0), \
356                                 rte_le_to_cpu_16(\
357                                         tmp_hwrm_err_op->opaque_1)); \
358                 } else { \
359                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
360                 } \
361                 rte_spinlock_unlock(&bp->hwrm_lock); \
362                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
363                         rc = -EACCES; \
364                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
365                         rc = -ENOSPC; \
366                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
367                         rc = -EINVAL; \
368                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
369                         rc = -ENOTSUP; \
370                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
371                         rc = -EAGAIN; \
372                 else if (rc > 0) \
373                         rc = -EIO; \
374                 return rc; \
375         } \
376 } while (0)
377
378 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
379
380 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
381                                 bool use_kong_mb,
382                                 uint16_t msg_type,
383                                 void *msg,
384                                 uint32_t msg_len,
385                                 void *resp_msg,
386                                 uint32_t resp_len)
387 {
388         int rc = 0;
389         bool mailbox = BNXT_USE_CHIMP_MB;
390         struct input *req = msg;
391         struct output *resp = bp->hwrm_cmd_resp_addr;
392
393         if (use_kong_mb)
394                 mailbox = BNXT_USE_KONG(bp);
395
396         HWRM_PREP(req, msg_type, mailbox);
397
398         rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
399
400         HWRM_CHECK_RESULT();
401
402         if (resp_msg)
403                 memcpy(resp_msg, resp, resp_len);
404
405         HWRM_UNLOCK();
406
407         return rc;
408 }
409
410 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
411                                   bool use_kong_mb,
412                                   uint16_t tf_type,
413                                   uint16_t tf_subtype,
414                                   uint32_t *tf_response_code,
415                                   void *msg,
416                                   uint32_t msg_len,
417                                   void *response,
418                                   uint32_t response_len)
419 {
420         int rc = 0;
421         struct hwrm_cfa_tflib_input req = { .req_type = 0 };
422         struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
423         bool mailbox = BNXT_USE_CHIMP_MB;
424
425         if (msg_len > sizeof(req.tf_req))
426                 return -ENOMEM;
427
428         if (use_kong_mb)
429                 mailbox = BNXT_USE_KONG(bp);
430
431         HWRM_PREP(&req, HWRM_TF, mailbox);
432         /* Build request using the user supplied request payload.
433          * TLV request size is checked at build time against HWRM
434          * request max size, thus no checking required.
435          */
436         req.tf_type = tf_type;
437         req.tf_subtype = tf_subtype;
438         memcpy(req.tf_req, msg, msg_len);
439
440         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
441         HWRM_CHECK_RESULT();
442
443         /* Copy the resp to user provided response buffer */
444         if (response != NULL)
445                 /* Post process response data. We need to copy only
446                  * the 'payload' as the HWRM data structure really is
447                  * HWRM header + msg header + payload and the TFLIB
448                  * only provided a payload place holder.
449                  */
450                 if (response_len != 0) {
451                         memcpy(response,
452                                resp->tf_resp,
453                                response_len);
454                 }
455
456         /* Extract the internal tflib response code */
457         *tf_response_code = resp->tf_resp_code;
458         HWRM_UNLOCK();
459
460         return rc;
461 }
462
463 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
464 {
465         int rc = 0;
466         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
467         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
468
469         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
470         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
471         req.mask = 0;
472
473         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
474
475         HWRM_CHECK_RESULT();
476         HWRM_UNLOCK();
477
478         return rc;
479 }
480
481 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
482                                  struct bnxt_vnic_info *vnic,
483                                  uint16_t vlan_count,
484                                  struct bnxt_vlan_table_entry *vlan_table)
485 {
486         int rc = 0;
487         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
488         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
489         uint32_t mask = 0;
490
491         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
492                 return rc;
493
494         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
495         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
496
497         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
498                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
499         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
500                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
501
502         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
503                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
504
505         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
506                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
507         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
508                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
509                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
510                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
511         }
512         if (vlan_table) {
513                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
514                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
515                 req.vlan_tag_tbl_addr =
516                         rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
517                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
518         }
519         req.mask = rte_cpu_to_le_32(mask);
520
521         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
522
523         HWRM_CHECK_RESULT();
524         HWRM_UNLOCK();
525
526         return rc;
527 }
528
529 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
530                         uint16_t vlan_count,
531                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
532 {
533         int rc = 0;
534         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
535         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
536                                                 bp->hwrm_cmd_resp_addr;
537
538         /*
539          * Older HWRM versions did not support this command, and the set_rx_mask
540          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
541          * removed from set_rx_mask call, and this command was added.
542          *
543          * This command is also present from 1.7.8.11 and higher,
544          * as well as 1.7.8.0
545          */
546         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
547                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
548                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
549                                         (11)))
550                                 return 0;
551                 }
552         }
553         HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
554         req.fid = rte_cpu_to_le_16(fid);
555
556         req.vlan_tag_mask_tbl_addr =
557                 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
558         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
559
560         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
561
562         HWRM_CHECK_RESULT();
563         HWRM_UNLOCK();
564
565         return rc;
566 }
567
568 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
569                              struct bnxt_filter_info *filter)
570 {
571         int rc = 0;
572         struct bnxt_filter_info *l2_filter = filter;
573         struct bnxt_vnic_info *vnic = NULL;
574         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
575         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
576
577         if (filter->fw_l2_filter_id == UINT64_MAX)
578                 return 0;
579
580         if (filter->matching_l2_fltr_ptr)
581                 l2_filter = filter->matching_l2_fltr_ptr;
582
583         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
584                     filter, l2_filter, l2_filter->l2_ref_cnt);
585
586         if (l2_filter->l2_ref_cnt == 0)
587                 return 0;
588
589         if (l2_filter->l2_ref_cnt > 0)
590                 l2_filter->l2_ref_cnt--;
591
592         if (l2_filter->l2_ref_cnt > 0)
593                 return 0;
594
595         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
596
597         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
598
599         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
600
601         HWRM_CHECK_RESULT();
602         HWRM_UNLOCK();
603
604         filter->fw_l2_filter_id = UINT64_MAX;
605         if (l2_filter->l2_ref_cnt == 0) {
606                 vnic = l2_filter->vnic;
607                 if (vnic) {
608                         STAILQ_REMOVE(&vnic->filter, l2_filter,
609                                       bnxt_filter_info, next);
610                         bnxt_free_filter(bp, l2_filter);
611                 }
612         }
613
614         return 0;
615 }
616
617 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
618                          uint16_t dst_id,
619                          struct bnxt_filter_info *filter)
620 {
621         int rc = 0;
622         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
623         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
624         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
625         const struct rte_eth_vmdq_rx_conf *conf =
626                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
627         uint32_t enables = 0;
628         uint16_t j = dst_id - 1;
629
630         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
631         if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
632             conf->pool_map[j].pools & (1UL << j)) {
633                 PMD_DRV_LOG(DEBUG,
634                         "Add vlan %u to vmdq pool %u\n",
635                         conf->pool_map[j].vlan_id, j);
636
637                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
638                 filter->enables |=
639                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
640                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
641         }
642
643         if (filter->fw_l2_filter_id != UINT64_MAX)
644                 bnxt_hwrm_clear_l2_filter(bp, filter);
645
646         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
647
648         /* PMD does not support XDP and RoCE */
649         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
650                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
651         req.flags = rte_cpu_to_le_32(filter->flags);
652
653         enables = filter->enables |
654               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
655         req.dst_id = rte_cpu_to_le_16(dst_id);
656
657         if (enables &
658             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
659                 memcpy(req.l2_addr, filter->l2_addr,
660                        RTE_ETHER_ADDR_LEN);
661         if (enables &
662             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
663                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
664                        RTE_ETHER_ADDR_LEN);
665         if (enables &
666             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
667                 req.l2_ovlan = filter->l2_ovlan;
668         if (enables &
669             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
670                 req.l2_ivlan = filter->l2_ivlan;
671         if (enables &
672             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
673                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
674         if (enables &
675             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
676                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
677         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
678                 req.src_id = rte_cpu_to_le_32(filter->src_id);
679         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
680                 req.src_type = filter->src_type;
681         if (filter->pri_hint) {
682                 req.pri_hint = filter->pri_hint;
683                 req.l2_filter_id_hint =
684                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
685         }
686
687         req.enables = rte_cpu_to_le_32(enables);
688
689         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
690
691         HWRM_CHECK_RESULT();
692
693         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
694         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
695         HWRM_UNLOCK();
696
697         filter->l2_ref_cnt++;
698
699         return rc;
700 }
701
702 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
703 {
704         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
705         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
706         uint32_t flags = 0;
707         int rc;
708
709         if (!ptp)
710                 return 0;
711
712         HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
713
714         if (ptp->rx_filter)
715                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
716         else
717                 flags |=
718                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
719         if (ptp->tx_tstamp_en)
720                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
721         else
722                 flags |=
723                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
724         req.flags = rte_cpu_to_le_32(flags);
725         req.enables = rte_cpu_to_le_32
726                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
727         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
728
729         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
730         HWRM_UNLOCK();
731
732         return rc;
733 }
734
735 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
736 {
737         int rc = 0;
738         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
739         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
740         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
741
742         if (ptp)
743                 return 0;
744
745         HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
746
747         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
748
749         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
750
751         HWRM_CHECK_RESULT();
752
753         if (BNXT_CHIP_P5(bp)) {
754                 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS))
755                         return 0;
756         } else {
757                 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
758                         return 0;
759         }
760
761         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
762                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
763
764         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
765         if (!ptp)
766                 return -ENOMEM;
767
768         if (!BNXT_CHIP_P5(bp)) {
769                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
770                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
771                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
772                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
773                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
774                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
775                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
776                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
777                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
778                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
779                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
780                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
781                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
782                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
783                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
784                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
785                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
786                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
787         }
788
789         ptp->bp = bp;
790         bp->ptp_cfg = ptp;
791
792         return 0;
793 }
794
795 void bnxt_free_vf_info(struct bnxt *bp)
796 {
797         int i;
798
799         if (bp->pf == NULL)
800                 return;
801
802         if (bp->pf->vf_info == NULL)
803                 return;
804
805         for (i = 0; i < bp->pf->max_vfs; i++) {
806                 rte_free(bp->pf->vf_info[i].vlan_table);
807                 bp->pf->vf_info[i].vlan_table = NULL;
808                 rte_free(bp->pf->vf_info[i].vlan_as_table);
809                 bp->pf->vf_info[i].vlan_as_table = NULL;
810         }
811         rte_free(bp->pf->vf_info);
812         bp->pf->vf_info = NULL;
813 }
814
815 static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs)
816 {
817         struct bnxt_child_vf_info *vf_info = bp->pf->vf_info;
818         int i;
819
820         if (vf_info)
821                 bnxt_free_vf_info(bp);
822
823         vf_info = rte_zmalloc("bnxt_vf_info", sizeof(*vf_info) * max_vfs, 0);
824         if (vf_info == NULL) {
825                 PMD_DRV_LOG(ERR, "Failed to alloc vf info\n");
826                 return -ENOMEM;
827         }
828
829         bp->pf->max_vfs = max_vfs;
830         for (i = 0; i < max_vfs; i++) {
831                 vf_info[i].fid = bp->pf->first_vf_id + i;
832                 vf_info[i].vlan_table = rte_zmalloc("VF VLAN table",
833                                                     getpagesize(), getpagesize());
834                 if (vf_info[i].vlan_table == NULL) {
835                         PMD_DRV_LOG(ERR, "Failed to alloc VLAN table for VF %d\n", i);
836                         goto err;
837                 }
838                 rte_mem_lock_page(vf_info[i].vlan_table);
839
840                 vf_info[i].vlan_as_table = rte_zmalloc("VF VLAN AS table",
841                                                        getpagesize(), getpagesize());
842                 if (vf_info[i].vlan_as_table == NULL) {
843                         PMD_DRV_LOG(ERR, "Failed to alloc VLAN AS table for VF %d\n", i);
844                         goto err;
845                 }
846                 rte_mem_lock_page(vf_info[i].vlan_as_table);
847
848                 STAILQ_INIT(&vf_info[i].filter);
849         }
850
851         bp->pf->vf_info = vf_info;
852
853         return 0;
854 err:
855         bnxt_free_vf_info(bp);
856         return -ENOMEM;
857 }
858
859 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
860 {
861         int rc = 0;
862         struct hwrm_func_qcaps_input req = {.req_type = 0 };
863         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
864         uint16_t new_max_vfs;
865         uint32_t flags;
866
867         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
868
869         req.fid = rte_cpu_to_le_16(0xffff);
870
871         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
872
873         HWRM_CHECK_RESULT();
874
875         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
876         flags = rte_le_to_cpu_32(resp->flags);
877         if (BNXT_PF(bp)) {
878                 bp->pf->port_id = resp->port_id;
879                 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
880                 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
881                 new_max_vfs = bp->pdev->max_vfs;
882                 if (new_max_vfs != bp->pf->max_vfs) {
883                         rc = bnxt_alloc_vf_info(bp, new_max_vfs);
884                         if (rc)
885                                 goto unlock;
886                 }
887         }
888
889         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
890         if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
891                 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
892                 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
893         } else {
894                 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
895         }
896         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
897         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
898         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
899         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
900         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
901         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
902         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
903         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
904                 bp->max_l2_ctx += bp->max_rx_em_flows;
905         /* TODO: For now, do not support VMDq/RFS on VFs. */
906         if (BNXT_PF(bp)) {
907                 if (bp->pf->max_vfs)
908                         bp->max_vnics = 1;
909                 else
910                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
911         } else {
912                 bp->max_vnics = 1;
913         }
914         PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
915                     bp->max_l2_ctx, bp->max_vnics);
916         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
917         if (BNXT_PF(bp)) {
918                 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
919                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
920                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
921                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
922                         HWRM_UNLOCK();
923                         bnxt_hwrm_ptp_qcfg(bp);
924                 }
925         }
926
927         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
928                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
929
930         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
931                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
932                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
933         }
934
935         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
936                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
937
938         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
939                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
940
941         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
942                 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
943
944         if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) {
945                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
946                 PMD_DRV_LOG(DEBUG, "VLAN acceleration for TX is enabled\n");
947         }
948 unlock:
949         HWRM_UNLOCK();
950
951         return rc;
952 }
953
954 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
955 {
956         int rc;
957
958         rc = __bnxt_hwrm_func_qcaps(bp);
959         if (rc == -ENOMEM)
960                 return rc;
961
962         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
963                 rc = bnxt_alloc_ctx_mem(bp);
964                 if (rc)
965                         return rc;
966
967                 /* On older FW,
968                  * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
969                  * But the error can be ignored. Return success.
970                  */
971                 rc = bnxt_hwrm_func_resc_qcaps(bp);
972                 if (!rc)
973                         bp->flags |= BNXT_FLAG_NEW_RM;
974         }
975
976         return 0;
977 }
978
979 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
980 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
981 {
982         int rc = 0;
983         uint32_t flags;
984         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
985         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
986
987         HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
988
989         req.target_id = rte_cpu_to_le_16(0xffff);
990
991         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
992
993         HWRM_CHECK_RESULT();
994
995         flags = rte_le_to_cpu_32(resp->flags);
996
997         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
998                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
999                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
1000         }
1001
1002         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
1003                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
1004
1005         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
1006                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
1007
1008         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP) {
1009                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_VLAN_RX_STRIP;
1010                 PMD_DRV_LOG(DEBUG, "Rx VLAN strip capability enabled\n");
1011         }
1012
1013         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
1014
1015         HWRM_UNLOCK();
1016
1017         return rc;
1018 }
1019
1020 int bnxt_hwrm_func_reset(struct bnxt *bp)
1021 {
1022         int rc = 0;
1023         struct hwrm_func_reset_input req = {.req_type = 0 };
1024         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
1025
1026         HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
1027
1028         req.enables = rte_cpu_to_le_32(0);
1029
1030         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1031
1032         HWRM_CHECK_RESULT();
1033         HWRM_UNLOCK();
1034
1035         return rc;
1036 }
1037
1038 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
1039 {
1040         int rc;
1041         uint32_t flags = 0;
1042         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
1043         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
1044
1045         if (bp->flags & BNXT_FLAG_REGISTERED)
1046                 return 0;
1047
1048         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
1049                 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
1050         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1051                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
1052
1053         /* PFs and trusted VFs should indicate the support of the
1054          * Master capability on non Stingray platform
1055          */
1056         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
1057                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
1058
1059         HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
1060         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
1061                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1062         req.ver_maj_8b = RTE_VER_YEAR;
1063         req.ver_min_8b = RTE_VER_MONTH;
1064         req.ver_upd_8b = RTE_VER_MINOR;
1065
1066         if (BNXT_PF(bp)) {
1067                 req.enables |= rte_cpu_to_le_32(
1068                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
1069                 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
1070                        RTE_MIN(sizeof(req.vf_req_fwd),
1071                                sizeof(bp->pf->vf_req_fwd)));
1072         }
1073
1074         req.flags = rte_cpu_to_le_32(flags);
1075
1076         req.async_event_fwd[0] |=
1077                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
1078                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
1079                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
1080                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
1081                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
1082         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1083                 req.async_event_fwd[0] |=
1084                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
1085         req.async_event_fwd[1] |=
1086                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
1087                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
1088         if (BNXT_PF(bp))
1089                 req.async_event_fwd[1] |=
1090                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
1091
1092         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
1093                 req.async_event_fwd[1] |=
1094                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
1095
1096         req.async_event_fwd[2] |=
1097                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST |
1098                                  ASYNC_CMPL_EVENT_ID_ERROR_REPORT);
1099
1100         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1101
1102         HWRM_CHECK_RESULT();
1103
1104         flags = rte_le_to_cpu_32(resp->flags);
1105         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
1106                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
1107
1108         HWRM_UNLOCK();
1109
1110         bp->flags |= BNXT_FLAG_REGISTERED;
1111
1112         return rc;
1113 }
1114
1115 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
1116 {
1117         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
1118                 return 0;
1119
1120         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
1121 }
1122
1123 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
1124 {
1125         int rc;
1126         uint32_t flags = 0;
1127         uint32_t enables;
1128         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1129         struct hwrm_func_vf_cfg_input req = {0};
1130
1131         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
1132
1133         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
1134                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
1135                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
1136                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1137                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
1138
1139         if (BNXT_HAS_RING_GRPS(bp)) {
1140                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
1141                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
1142         }
1143
1144         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
1145         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
1146                                             AGG_RING_MULTIPLIER);
1147         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
1148         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
1149                                               bp->tx_nr_rings +
1150                                               BNXT_NUM_ASYNC_CPR(bp));
1151         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
1152         if (bp->vf_resv_strategy ==
1153             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1154                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1155                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1156                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1157                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1158                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1159                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1160         } else if (bp->vf_resv_strategy ==
1161                    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1162                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1163                 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1164         }
1165
1166         if (test)
1167                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1168                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1169                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1170                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1171                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1172                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1173
1174         if (test && BNXT_HAS_RING_GRPS(bp))
1175                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1176
1177         req.flags = rte_cpu_to_le_32(flags);
1178         req.enables |= rte_cpu_to_le_32(enables);
1179
1180         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1181
1182         if (test)
1183                 HWRM_CHECK_RESULT_SILENT();
1184         else
1185                 HWRM_CHECK_RESULT();
1186
1187         HWRM_UNLOCK();
1188         return rc;
1189 }
1190
1191 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1192 {
1193         int rc;
1194         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1195         struct hwrm_func_resource_qcaps_input req = {0};
1196
1197         HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1198         req.fid = rte_cpu_to_le_16(0xffff);
1199
1200         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1201
1202         HWRM_CHECK_RESULT_SILENT();
1203
1204         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1205         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1206         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1207         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1208         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1209         /* func_resource_qcaps does not return max_rx_em_flows.
1210          * So use the value provided by func_qcaps.
1211          */
1212         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1213         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
1214                 bp->max_l2_ctx += bp->max_rx_em_flows;
1215         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1216         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1217         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1218         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1219         if (bp->vf_resv_strategy >
1220             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1221                 bp->vf_resv_strategy =
1222                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1223
1224         HWRM_UNLOCK();
1225         return rc;
1226 }
1227
1228 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1229 {
1230         int rc = 0;
1231         struct hwrm_ver_get_input req = {.req_type = 0 };
1232         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1233         uint32_t fw_version;
1234         uint16_t max_resp_len;
1235         char type[RTE_MEMZONE_NAMESIZE];
1236         uint32_t dev_caps_cfg;
1237
1238         bp->max_req_len = HWRM_MAX_REQ_LEN;
1239         bp->hwrm_cmd_timeout = timeout;
1240         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1241
1242         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1243         req.hwrm_intf_min = HWRM_VERSION_MINOR;
1244         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1245
1246         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1247
1248         if (bp->flags & BNXT_FLAG_FW_RESET)
1249                 HWRM_CHECK_RESULT_SILENT();
1250         else
1251                 HWRM_CHECK_RESULT();
1252
1253         if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY) {
1254                 rc = -EAGAIN;
1255                 goto error;
1256         }
1257
1258         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
1259                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1260                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1261                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
1262                 resp->hwrm_fw_rsvd_8b);
1263         bp->fw_ver = ((uint32_t)resp->hwrm_fw_maj_8b << 24) |
1264                      ((uint32_t)resp->hwrm_fw_min_8b << 16) |
1265                      ((uint32_t)resp->hwrm_fw_bld_8b << 8) |
1266                      resp->hwrm_fw_rsvd_8b;
1267         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1268                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1269
1270         fw_version = resp->hwrm_intf_maj_8b << 16;
1271         fw_version |= resp->hwrm_intf_min_8b << 8;
1272         fw_version |= resp->hwrm_intf_upd_8b;
1273         bp->hwrm_spec_code = fw_version;
1274
1275         /* def_req_timeout value is in milliseconds */
1276         bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1277         /* convert timeout to usec */
1278         bp->hwrm_cmd_timeout *= 1000;
1279         if (!bp->hwrm_cmd_timeout)
1280                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1281
1282         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1283                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1284                 rc = -EINVAL;
1285                 goto error;
1286         }
1287
1288         if (bp->max_req_len > resp->max_req_win_len) {
1289                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1290                 rc = -EINVAL;
1291                 goto error;
1292         }
1293
1294         bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
1295
1296         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1297         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1298         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1299                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1300
1301         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1302         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1303
1304         RTE_VERIFY(max_resp_len <= bp->max_resp_len);
1305         bp->max_resp_len = max_resp_len;
1306
1307         if ((dev_caps_cfg &
1308                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1309             (dev_caps_cfg &
1310              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1311                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1312                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1313         }
1314
1315         if (((dev_caps_cfg &
1316               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1317              (dev_caps_cfg &
1318               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1319             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1320                 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1321                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1322                         bp->pdev->addr.devid, bp->pdev->addr.function);
1323
1324                 rte_free(bp->hwrm_short_cmd_req_addr);
1325
1326                 bp->hwrm_short_cmd_req_addr =
1327                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1328                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1329                         rc = -ENOMEM;
1330                         goto error;
1331                 }
1332                 bp->hwrm_short_cmd_req_dma_addr =
1333                         rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1334                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1335                         rte_free(bp->hwrm_short_cmd_req_addr);
1336                         PMD_DRV_LOG(ERR,
1337                                 "Unable to map buffer to physical memory.\n");
1338                         rc = -ENOMEM;
1339                         goto error;
1340                 }
1341         }
1342         if (dev_caps_cfg &
1343             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1344                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1345                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1346         }
1347         if (dev_caps_cfg &
1348             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1349                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1350         if (dev_caps_cfg &
1351             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1352                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1353                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1354         }
1355
1356         if (dev_caps_cfg &
1357             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1358                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1359                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1360         }
1361
1362         if (dev_caps_cfg &
1363             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED) {
1364                 PMD_DRV_LOG(DEBUG, "Host-based truflow feature enabled.\n");
1365                 bp->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN;
1366         }
1367
1368 error:
1369         HWRM_UNLOCK();
1370         return rc;
1371 }
1372
1373 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp)
1374 {
1375         int rc;
1376         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1377         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1378
1379         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1380                 return 0;
1381
1382         HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1383
1384         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1385
1386         HWRM_CHECK_RESULT();
1387         HWRM_UNLOCK();
1388
1389         PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n",
1390                     bp->eth_dev->data->port_id);
1391
1392         return rc;
1393 }
1394
1395 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1396 {
1397         int rc = 0;
1398         struct hwrm_port_phy_cfg_input req = {0};
1399         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1400         uint32_t enables = 0;
1401
1402         HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1403
1404         if (conf->link_up) {
1405                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1406                 if (bp->link_info->auto_mode && conf->link_speed) {
1407                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1408                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1409                 }
1410
1411                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1412                 /*
1413                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1414                  * any auto mode, even "none".
1415                  */
1416                 if (!conf->link_speed) {
1417                         /* No speeds specified. Enable AutoNeg - all speeds */
1418                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1419                         req.auto_mode =
1420                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1421                 } else {
1422                         if (bp->link_info->link_signal_mode) {
1423                                 enables |=
1424                                 HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1425                                 req.force_pam4_link_speed =
1426                                         rte_cpu_to_le_16(conf->link_speed);
1427                         } else {
1428                                 req.force_link_speed =
1429                                         rte_cpu_to_le_16(conf->link_speed);
1430                         }
1431                 }
1432                 /* AutoNeg - Advertise speeds specified. */
1433                 if (conf->auto_link_speed_mask &&
1434                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1435                         req.auto_mode =
1436                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1437                         req.auto_link_speed_mask =
1438                                 conf->auto_link_speed_mask;
1439                         if (conf->auto_pam4_link_speeds) {
1440                                 enables |=
1441                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1442                                 req.auto_link_pam4_speed_mask =
1443                                         conf->auto_pam4_link_speeds;
1444                         } else {
1445                                 enables |=
1446                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1447                         }
1448                 }
1449                 if (conf->auto_link_speed &&
1450                 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1451                         enables |=
1452                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1453
1454                 req.auto_duplex = conf->duplex;
1455                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1456                 req.auto_pause = conf->auto_pause;
1457                 req.force_pause = conf->force_pause;
1458                 /* Set force_pause if there is no auto or if there is a force */
1459                 if (req.auto_pause && !req.force_pause)
1460                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1461                 else
1462                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1463
1464                 req.enables = rte_cpu_to_le_32(enables);
1465         } else {
1466                 req.flags =
1467                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1468                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1469         }
1470
1471         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1472
1473         HWRM_CHECK_RESULT();
1474         HWRM_UNLOCK();
1475
1476         return rc;
1477 }
1478
1479 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1480                                    struct bnxt_link_info *link_info)
1481 {
1482         int rc = 0;
1483         struct hwrm_port_phy_qcfg_input req = {0};
1484         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1485
1486         HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1487
1488         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1489
1490         HWRM_CHECK_RESULT();
1491
1492         link_info->phy_link_status = resp->link;
1493         link_info->link_up =
1494                 (link_info->phy_link_status ==
1495                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1496         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1497         link_info->duplex = resp->duplex_cfg;
1498         link_info->pause = resp->pause;
1499         link_info->auto_pause = resp->auto_pause;
1500         link_info->force_pause = resp->force_pause;
1501         link_info->auto_mode = resp->auto_mode;
1502         link_info->phy_type = resp->phy_type;
1503         link_info->media_type = resp->media_type;
1504
1505         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1506         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1507         link_info->auto_link_speed_mask = rte_le_to_cpu_16(resp->auto_link_speed_mask);
1508         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1509         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1510         link_info->phy_ver[0] = resp->phy_maj;
1511         link_info->phy_ver[1] = resp->phy_min;
1512         link_info->phy_ver[2] = resp->phy_bld;
1513         link_info->link_signal_mode =
1514                 rte_le_to_cpu_16(resp->active_fec_signal_mode);
1515         link_info->force_pam4_link_speed =
1516                         rte_le_to_cpu_16(resp->force_pam4_link_speed);
1517         link_info->support_pam4_speeds =
1518                         rte_le_to_cpu_16(resp->support_pam4_speeds);
1519         link_info->auto_pam4_link_speeds =
1520                         rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1521         link_info->module_status = resp->module_status;
1522         HWRM_UNLOCK();
1523
1524         PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1525                     link_info->link_speed, link_info->auto_mode,
1526                     link_info->auto_link_speed, link_info->auto_link_speed_mask,
1527                     link_info->support_speeds, link_info->force_link_speed);
1528         PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
1529                     link_info->link_signal_mode,
1530                     link_info->auto_pam4_link_speeds,
1531                     link_info->support_pam4_speeds,
1532                     link_info->force_pam4_link_speed);
1533         return rc;
1534 }
1535
1536 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1537 {
1538         int rc = 0;
1539         struct hwrm_port_phy_qcaps_input req = {0};
1540         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1541         struct bnxt_link_info *link_info = bp->link_info;
1542
1543         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1544                 return 0;
1545
1546         HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1547
1548         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1549
1550         HWRM_CHECK_RESULT_SILENT();
1551
1552         bp->port_cnt = resp->port_cnt;
1553         if (resp->supported_speeds_auto_mode)
1554                 link_info->support_auto_speeds =
1555                         rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1556         if (resp->supported_pam4_speeds_auto_mode)
1557                 link_info->support_pam4_auto_speeds =
1558                         rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1559
1560         HWRM_UNLOCK();
1561
1562         /* Older firmware does not have supported_auto_speeds, so assume
1563          * that all supported speeds can be autonegotiated.
1564          */
1565         if (link_info->auto_link_speed_mask && !link_info->support_auto_speeds)
1566                 link_info->support_auto_speeds = link_info->support_speeds;
1567
1568         return 0;
1569 }
1570
1571 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1572 {
1573         int i = 0;
1574
1575         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1576                 if (bp->tx_cos_queue[i].profile ==
1577                     HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1578                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1579                         return true;
1580                 }
1581         }
1582         return false;
1583 }
1584
1585 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1586 {
1587         int i = 0;
1588
1589         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1590                 if (bp->tx_cos_queue[i].profile !=
1591                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1592                     bp->tx_cos_queue[i].id !=
1593                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1594                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1595                         break;
1596                 }
1597         }
1598 }
1599
1600 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1601 {
1602         int rc = 0;
1603         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1604         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1605         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1606         int i;
1607
1608 get_rx_info:
1609         HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1610
1611         req.flags = rte_cpu_to_le_32(dir);
1612         /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1613         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1614             !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1615                 req.drv_qmap_cap =
1616                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1617         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1618
1619         HWRM_CHECK_RESULT();
1620
1621         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1622                 GET_TX_QUEUE_INFO(0);
1623                 GET_TX_QUEUE_INFO(1);
1624                 GET_TX_QUEUE_INFO(2);
1625                 GET_TX_QUEUE_INFO(3);
1626                 GET_TX_QUEUE_INFO(4);
1627                 GET_TX_QUEUE_INFO(5);
1628                 GET_TX_QUEUE_INFO(6);
1629                 GET_TX_QUEUE_INFO(7);
1630         } else  {
1631                 GET_RX_QUEUE_INFO(0);
1632                 GET_RX_QUEUE_INFO(1);
1633                 GET_RX_QUEUE_INFO(2);
1634                 GET_RX_QUEUE_INFO(3);
1635                 GET_RX_QUEUE_INFO(4);
1636                 GET_RX_QUEUE_INFO(5);
1637                 GET_RX_QUEUE_INFO(6);
1638                 GET_RX_QUEUE_INFO(7);
1639         }
1640
1641         HWRM_UNLOCK();
1642
1643         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1644                 goto done;
1645
1646         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1647                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1648         } else {
1649                 int j;
1650
1651                 /* iterate and find the COSq profile to use for Tx */
1652                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1653                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1654                                 if (bp->tx_cos_queue[i].id != 0xff)
1655                                         bp->tx_cosq_id[j++] =
1656                                                 bp->tx_cos_queue[i].id;
1657                         }
1658                 } else {
1659                         /* When CoS classification is disabled, for normal NIC
1660                          * operations, ideally we should look to use LOSSY.
1661                          * If not found, fallback to the first valid profile
1662                          */
1663                         if (!bnxt_find_lossy_profile(bp))
1664                                 bnxt_find_first_valid_profile(bp);
1665
1666                 }
1667         }
1668
1669         bp->max_tc = resp->max_configurable_queues;
1670         bp->max_lltc = resp->max_configurable_lossless_queues;
1671         if (bp->max_tc > BNXT_MAX_QUEUE)
1672                 bp->max_tc = BNXT_MAX_QUEUE;
1673         bp->max_q = bp->max_tc;
1674
1675         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1676                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1677                 goto get_rx_info;
1678         }
1679
1680 done:
1681         return rc;
1682 }
1683
1684 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1685                          struct bnxt_ring *ring,
1686                          uint32_t ring_type, uint32_t map_index,
1687                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1688                          uint16_t tx_cosq_id)
1689 {
1690         int rc = 0;
1691         uint32_t enables = 0;
1692         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1693         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1694         struct rte_mempool *mb_pool;
1695         uint16_t rx_buf_size;
1696
1697         HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1698
1699         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1700         req.fbo = rte_cpu_to_le_32(0);
1701         /* Association of ring index with doorbell index */
1702         req.logical_id = rte_cpu_to_le_16(map_index);
1703         req.length = rte_cpu_to_le_32(ring->ring_size);
1704
1705         switch (ring_type) {
1706         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1707                 req.ring_type = ring_type;
1708                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1709                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1710                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1711                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1712                         enables |=
1713                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1714                 break;
1715         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1716                 req.ring_type = ring_type;
1717                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1718                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1719                 if (BNXT_CHIP_P5(bp)) {
1720                         mb_pool = bp->rx_queues[0]->mb_pool;
1721                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1722                                       RTE_PKTMBUF_HEADROOM;
1723                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1724                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1725                         enables |=
1726                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1727                 }
1728                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1729                         enables |=
1730                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1731                 break;
1732         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1733                 req.ring_type = ring_type;
1734                 if (BNXT_HAS_NQ(bp)) {
1735                         /* Association of cp ring with nq */
1736                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1737                         enables |=
1738                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1739                 }
1740                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1741                 break;
1742         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1743                 req.ring_type = ring_type;
1744                 req.page_size = BNXT_PAGE_SHFT;
1745                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1746                 break;
1747         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1748                 req.ring_type = ring_type;
1749                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1750
1751                 mb_pool = bp->rx_queues[0]->mb_pool;
1752                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1753                               RTE_PKTMBUF_HEADROOM;
1754                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1755                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1756
1757                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1758                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1759                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1760                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1761                 break;
1762         default:
1763                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1764                         ring_type);
1765                 HWRM_UNLOCK();
1766                 return -EINVAL;
1767         }
1768         req.enables = rte_cpu_to_le_32(enables);
1769
1770         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1771
1772         if (rc || resp->error_code) {
1773                 if (rc == 0 && resp->error_code)
1774                         rc = rte_le_to_cpu_16(resp->error_code);
1775                 switch (ring_type) {
1776                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1777                         PMD_DRV_LOG(ERR,
1778                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1779                         HWRM_UNLOCK();
1780                         return rc;
1781                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1782                         PMD_DRV_LOG(ERR,
1783                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1784                         HWRM_UNLOCK();
1785                         return rc;
1786                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1787                         PMD_DRV_LOG(ERR,
1788                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1789                                     rc);
1790                         HWRM_UNLOCK();
1791                         return rc;
1792                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1793                         PMD_DRV_LOG(ERR,
1794                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1795                         HWRM_UNLOCK();
1796                         return rc;
1797                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1798                         PMD_DRV_LOG(ERR,
1799                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1800                         HWRM_UNLOCK();
1801                         return rc;
1802                 default:
1803                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1804                         HWRM_UNLOCK();
1805                         return rc;
1806                 }
1807         }
1808
1809         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1810         HWRM_UNLOCK();
1811         return rc;
1812 }
1813
1814 int bnxt_hwrm_ring_free(struct bnxt *bp,
1815                         struct bnxt_ring *ring, uint32_t ring_type,
1816                         uint16_t cp_ring_id)
1817 {
1818         int rc;
1819         struct hwrm_ring_free_input req = {.req_type = 0 };
1820         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1821
1822         if (ring->fw_ring_id == INVALID_HW_RING_ID)
1823                 return -EINVAL;
1824
1825         HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1826
1827         req.ring_type = ring_type;
1828         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1829         req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id);
1830
1831         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1832         ring->fw_ring_id = INVALID_HW_RING_ID;
1833
1834         if (rc || resp->error_code) {
1835                 if (rc == 0 && resp->error_code)
1836                         rc = rte_le_to_cpu_16(resp->error_code);
1837                 HWRM_UNLOCK();
1838
1839                 switch (ring_type) {
1840                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1841                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1842                                 rc);
1843                         return rc;
1844                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1845                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1846                                 rc);
1847                         return rc;
1848                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1849                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1850                                 rc);
1851                         return rc;
1852                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1853                         PMD_DRV_LOG(ERR,
1854                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1855                         return rc;
1856                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1857                         PMD_DRV_LOG(ERR,
1858                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1859                         return rc;
1860                 default:
1861                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1862                         return rc;
1863                 }
1864         }
1865         HWRM_UNLOCK();
1866         return 0;
1867 }
1868
1869 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1870 {
1871         int rc = 0;
1872         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1873         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1874
1875         /* Don't attempt to re-create the ring group if it is already created */
1876         if (bp->grp_info[idx].fw_grp_id != INVALID_HW_RING_ID)
1877                 return 0;
1878
1879         HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1880
1881         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1882         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1883         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1884         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1885
1886         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1887
1888         HWRM_CHECK_RESULT();
1889
1890         bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1891
1892         HWRM_UNLOCK();
1893
1894         return rc;
1895 }
1896
1897 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1898 {
1899         int rc;
1900         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1901         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1902
1903         if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1904                 return 0;
1905
1906         HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1907
1908         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1909
1910         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1911
1912         HWRM_CHECK_RESULT();
1913         HWRM_UNLOCK();
1914
1915         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1916         return rc;
1917 }
1918
1919 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1920 {
1921         int rc = 0;
1922         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1923         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1924
1925         if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
1926                 return rc;
1927
1928         HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1929
1930         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1931
1932         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1933
1934         HWRM_CHECK_RESULT();
1935         HWRM_UNLOCK();
1936
1937         return rc;
1938 }
1939
1940 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1941 {
1942         int rc;
1943         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1944         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1945
1946         if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE)
1947                 return 0;
1948
1949         HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1950
1951         req.update_period_ms = rte_cpu_to_le_32(0);
1952
1953         req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1954
1955         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1956
1957         HWRM_CHECK_RESULT();
1958
1959         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1960
1961         HWRM_UNLOCK();
1962
1963         return rc;
1964 }
1965
1966 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1967 {
1968         int rc;
1969         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1970         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1971
1972         if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
1973                 return 0;
1974
1975         HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1976
1977         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1978
1979         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1980
1981         HWRM_CHECK_RESULT();
1982         HWRM_UNLOCK();
1983
1984         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1985
1986         return rc;
1987 }
1988
1989 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1990 {
1991         int rc = 0, i, j;
1992         struct hwrm_vnic_alloc_input req = { 0 };
1993         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1994
1995         if (!BNXT_HAS_RING_GRPS(bp))
1996                 goto skip_ring_grps;
1997
1998         /* map ring groups to this vnic */
1999         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
2000                 vnic->start_grp_id, vnic->end_grp_id);
2001         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
2002                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
2003
2004         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
2005         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
2006         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
2007         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
2008
2009 skip_ring_grps:
2010         vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
2011         HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
2012
2013         if (vnic->func_default)
2014                 req.flags =
2015                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2016         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2017
2018         HWRM_CHECK_RESULT();
2019
2020         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
2021         HWRM_UNLOCK();
2022         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2023         return rc;
2024 }
2025
2026 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
2027                                         struct bnxt_vnic_info *vnic,
2028                                         struct bnxt_plcmodes_cfg *pmode)
2029 {
2030         int rc = 0;
2031         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
2032         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2033
2034         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
2035
2036         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2037
2038         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2039
2040         HWRM_CHECK_RESULT();
2041
2042         pmode->flags = rte_le_to_cpu_32(resp->flags);
2043         /* dflt_vnic bit doesn't exist in the _cfg command */
2044         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
2045         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
2046         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
2047         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
2048
2049         HWRM_UNLOCK();
2050
2051         return rc;
2052 }
2053
2054 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
2055                                        struct bnxt_vnic_info *vnic,
2056                                        struct bnxt_plcmodes_cfg *pmode)
2057 {
2058         int rc = 0;
2059         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2060         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2061
2062         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2063                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2064                 return rc;
2065         }
2066
2067         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2068
2069         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2070         req.flags = rte_cpu_to_le_32(pmode->flags);
2071         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
2072         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
2073         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
2074         req.enables = rte_cpu_to_le_32(
2075             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
2076             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
2077             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
2078         );
2079
2080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2081
2082         HWRM_CHECK_RESULT();
2083         HWRM_UNLOCK();
2084
2085         return rc;
2086 }
2087
2088 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2089 {
2090         int rc = 0;
2091         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
2092         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2093         struct bnxt_plcmodes_cfg pmodes = { 0 };
2094         uint32_t ctx_enable_flag = 0;
2095         uint32_t enables = 0;
2096
2097         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2098                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2099                 return rc;
2100         }
2101
2102         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
2103         if (rc)
2104                 return rc;
2105
2106         HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
2107
2108         if (BNXT_CHIP_P5(bp)) {
2109                 int dflt_rxq = vnic->start_grp_id;
2110                 struct bnxt_rx_ring_info *rxr;
2111                 struct bnxt_cp_ring_info *cpr;
2112                 struct bnxt_rx_queue *rxq;
2113                 int i;
2114
2115                 /*
2116                  * The first active receive ring is used as the VNIC
2117                  * default receive ring. If there are no active receive
2118                  * rings (all corresponding receive queues are stopped),
2119                  * the first receive ring is used.
2120                  */
2121                 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
2122                         rxq = bp->eth_dev->data->rx_queues[i];
2123                         if (rxq->rx_started) {
2124                                 dflt_rxq = i;
2125                                 break;
2126                         }
2127                 }
2128
2129                 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
2130                 rxr = rxq->rx_ring;
2131                 cpr = rxq->cp_ring;
2132
2133                 req.default_rx_ring_id =
2134                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
2135                 req.default_cmpl_ring_id =
2136                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
2137                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
2138                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
2139                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
2140                         enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE;
2141                         req.rx_csum_v2_mode =
2142                                 HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK;
2143                 }
2144                 goto config_mru;
2145         }
2146
2147         /* Only RSS support for now TBD: COS & LB */
2148         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
2149         if (vnic->lb_rule != 0xffff)
2150                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
2151         if (vnic->cos_rule != 0xffff)
2152                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
2153         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
2154                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
2155                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
2156         }
2157         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
2158                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
2159                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
2160         }
2161
2162         enables |= ctx_enable_flag;
2163         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
2164         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
2165         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
2166         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
2167
2168 config_mru:
2169         req.enables = rte_cpu_to_le_32(enables);
2170         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2171         req.mru = rte_cpu_to_le_16(vnic->mru);
2172         /* Configure default VNIC only once. */
2173         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
2174                 req.flags |=
2175                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2176                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
2177         }
2178         if (vnic->vlan_strip)
2179                 req.flags |=
2180                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2181         if (vnic->bd_stall)
2182                 req.flags |=
2183                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2184         if (vnic->rss_dflt_cr)
2185                 req.flags |= rte_cpu_to_le_32(
2186                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2187
2188         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2189
2190         HWRM_CHECK_RESULT();
2191         HWRM_UNLOCK();
2192
2193         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2194
2195         return rc;
2196 }
2197
2198 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2199                 int16_t fw_vf_id)
2200 {
2201         int rc = 0;
2202         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2203         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2204
2205         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2206                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2207                 return rc;
2208         }
2209         HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2210
2211         req.enables =
2212                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2213         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2214         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2215
2216         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2217
2218         HWRM_CHECK_RESULT();
2219
2220         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2221         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2222         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2223         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2224         vnic->mru = rte_le_to_cpu_16(resp->mru);
2225         vnic->func_default = rte_le_to_cpu_32(
2226                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2227         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2228                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2229         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2230                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2231         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2232                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2233
2234         HWRM_UNLOCK();
2235
2236         return rc;
2237 }
2238
2239 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2240                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2241 {
2242         int rc = 0;
2243         uint16_t ctx_id;
2244         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2245         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2246                                                 bp->hwrm_cmd_resp_addr;
2247
2248         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2249
2250         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2251         HWRM_CHECK_RESULT();
2252
2253         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2254         if (!BNXT_HAS_RING_GRPS(bp))
2255                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2256         else if (ctx_idx == 0)
2257                 vnic->rss_rule = ctx_id;
2258
2259         HWRM_UNLOCK();
2260
2261         return rc;
2262 }
2263
2264 static
2265 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2266                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2267 {
2268         int rc = 0;
2269         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2270         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2271                                                 bp->hwrm_cmd_resp_addr;
2272
2273         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2274                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2275                 return rc;
2276         }
2277         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2278
2279         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2280
2281         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2282
2283         HWRM_CHECK_RESULT();
2284         HWRM_UNLOCK();
2285
2286         return rc;
2287 }
2288
2289 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2290 {
2291         int rc = 0;
2292
2293         if (BNXT_CHIP_P5(bp)) {
2294                 int j;
2295
2296                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2297                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
2298                                                       vnic,
2299                                                       vnic->fw_grp_ids[j]);
2300                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2301                 }
2302                 vnic->num_lb_ctxts = 0;
2303         } else {
2304                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2305                 vnic->rss_rule = INVALID_HW_RING_ID;
2306         }
2307
2308         return rc;
2309 }
2310
2311 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2312 {
2313         int rc = 0;
2314         struct hwrm_vnic_free_input req = {.req_type = 0 };
2315         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2316
2317         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2318                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2319                 return rc;
2320         }
2321
2322         HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2323
2324         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2325
2326         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2327
2328         HWRM_CHECK_RESULT();
2329         HWRM_UNLOCK();
2330
2331         vnic->fw_vnic_id = INVALID_HW_RING_ID;
2332         /* Configure default VNIC again if necessary. */
2333         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2334                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2335
2336         return rc;
2337 }
2338
2339 static int
2340 bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2341 {
2342         int i;
2343         int rc = 0;
2344         int nr_ctxs = vnic->num_lb_ctxts;
2345         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2346         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2347
2348         for (i = 0; i < nr_ctxs; i++) {
2349                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2350
2351                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2352                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2353                 req.hash_mode_flags = vnic->hash_mode;
2354
2355                 req.hash_key_tbl_addr =
2356                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2357
2358                 req.ring_grp_tbl_addr =
2359                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2360                                          i * HW_HASH_INDEX_SIZE);
2361                 req.ring_table_pair_index = i;
2362                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2363
2364                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2365                                             BNXT_USE_CHIMP_MB);
2366
2367                 HWRM_CHECK_RESULT();
2368                 HWRM_UNLOCK();
2369         }
2370
2371         return rc;
2372 }
2373
2374 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2375                            struct bnxt_vnic_info *vnic)
2376 {
2377         int rc = 0;
2378         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2379         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2380
2381         if (!vnic->rss_table)
2382                 return 0;
2383
2384         if (BNXT_CHIP_P5(bp))
2385                 return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
2386
2387         HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2388
2389         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2390         req.hash_mode_flags = vnic->hash_mode;
2391
2392         req.ring_grp_tbl_addr =
2393             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2394         req.hash_key_tbl_addr =
2395             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2396         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2397         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2398
2399         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2400
2401         HWRM_CHECK_RESULT();
2402         HWRM_UNLOCK();
2403
2404         return rc;
2405 }
2406
2407 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2408                         struct bnxt_vnic_info *vnic)
2409 {
2410         int rc = 0;
2411         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2412         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2413         uint16_t size;
2414
2415         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2416                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2417                 return rc;
2418         }
2419
2420         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2421
2422         req.flags = rte_cpu_to_le_32(
2423                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2424
2425         req.enables = rte_cpu_to_le_32(
2426                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2427
2428         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2429         size -= RTE_PKTMBUF_HEADROOM;
2430         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2431
2432         req.jumbo_thresh = rte_cpu_to_le_16(size);
2433         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2434
2435         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2436
2437         HWRM_CHECK_RESULT();
2438         HWRM_UNLOCK();
2439
2440         return rc;
2441 }
2442
2443 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2444                         struct bnxt_vnic_info *vnic, bool enable)
2445 {
2446         int rc = 0;
2447         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2448         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2449
2450         if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
2451                 if (enable)
2452                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2453                 return -ENOTSUP;
2454         }
2455
2456         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2457                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2458                 return 0;
2459         }
2460
2461         HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2462
2463         if (enable) {
2464                 req.enables = rte_cpu_to_le_32(
2465                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2466                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2467                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2468                 req.flags = rte_cpu_to_le_32(
2469                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2470                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2471                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2472                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2473                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2474                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2475                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2476                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2477                 req.min_agg_len = rte_cpu_to_le_32(512);
2478         }
2479         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2480
2481         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2482
2483         HWRM_CHECK_RESULT();
2484         HWRM_UNLOCK();
2485
2486         return rc;
2487 }
2488
2489 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2490 {
2491         struct hwrm_func_cfg_input req = {0};
2492         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2493         int rc;
2494
2495         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2496         req.enables = rte_cpu_to_le_32(
2497                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2498         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2499         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2500
2501         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2502
2503         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2504         HWRM_CHECK_RESULT();
2505         HWRM_UNLOCK();
2506
2507         bp->pf->vf_info[vf].random_mac = false;
2508
2509         return rc;
2510 }
2511
2512 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2513                                   uint64_t *dropped)
2514 {
2515         int rc = 0;
2516         struct hwrm_func_qstats_input req = {.req_type = 0};
2517         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2518
2519         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2520
2521         req.fid = rte_cpu_to_le_16(fid);
2522
2523         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2524
2525         HWRM_CHECK_RESULT();
2526
2527         if (dropped)
2528                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2529
2530         HWRM_UNLOCK();
2531
2532         return rc;
2533 }
2534
2535 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2536                           struct rte_eth_stats *stats,
2537                           struct hwrm_func_qstats_output *func_qstats)
2538 {
2539         int rc = 0;
2540         struct hwrm_func_qstats_input req = {.req_type = 0};
2541         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2542
2543         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2544
2545         req.fid = rte_cpu_to_le_16(fid);
2546
2547         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2548
2549         HWRM_CHECK_RESULT();
2550         if (func_qstats)
2551                 memcpy(func_qstats, resp,
2552                        sizeof(struct hwrm_func_qstats_output));
2553
2554         if (!stats)
2555                 goto exit;
2556
2557         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2558         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2559         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2560         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2561         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2562         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2563
2564         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2565         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2566         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2567         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2568         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2569         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2570
2571         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2572         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2573         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2574
2575 exit:
2576         HWRM_UNLOCK();
2577
2578         return rc;
2579 }
2580
2581 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2582 {
2583         int rc = 0;
2584         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2585         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2586
2587         HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2588
2589         req.fid = rte_cpu_to_le_16(fid);
2590
2591         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2592
2593         HWRM_CHECK_RESULT();
2594         HWRM_UNLOCK();
2595
2596         return rc;
2597 }
2598
2599 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2600 {
2601         unsigned int i;
2602         int rc = 0;
2603
2604         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2605                 struct bnxt_tx_queue *txq;
2606                 struct bnxt_rx_queue *rxq;
2607                 struct bnxt_cp_ring_info *cpr;
2608
2609                 if (i >= bp->rx_cp_nr_rings) {
2610                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2611                         cpr = txq->cp_ring;
2612                 } else {
2613                         rxq = bp->rx_queues[i];
2614                         cpr = rxq->cp_ring;
2615                 }
2616
2617                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2618                 if (rc)
2619                         return rc;
2620         }
2621         return 0;
2622 }
2623
2624 static int
2625 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2626 {
2627         int rc;
2628         unsigned int i;
2629         struct bnxt_cp_ring_info *cpr;
2630
2631         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2632
2633                 cpr = bp->rx_queues[i]->cp_ring;
2634                 if (BNXT_HAS_RING_GRPS(bp))
2635                         bp->grp_info[i].fw_stats_ctx = -1;
2636                 rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
2637                 if (rc)
2638                         return rc;
2639         }
2640
2641         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2642                 cpr = bp->tx_queues[i]->cp_ring;
2643                 rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
2644                 if (rc)
2645                         return rc;
2646         }
2647
2648         return 0;
2649 }
2650
2651 static int
2652 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2653 {
2654         uint16_t idx;
2655         uint32_t rc = 0;
2656
2657         if (!BNXT_HAS_RING_GRPS(bp))
2658                 return 0;
2659
2660         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2661
2662                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2663                         continue;
2664
2665                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2666
2667                 if (rc)
2668                         return rc;
2669         }
2670         return rc;
2671 }
2672
2673 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2674 {
2675         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2676
2677         bnxt_hwrm_ring_free(bp, cp_ring,
2678                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ,
2679                             INVALID_HW_RING_ID);
2680         memset(cpr->cp_desc_ring, 0,
2681                cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
2682         cpr->cp_raw_cons = 0;
2683 }
2684
2685 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2686 {
2687         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2688
2689         bnxt_hwrm_ring_free(bp, cp_ring,
2690                             HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL,
2691                             INVALID_HW_RING_ID);
2692         memset(cpr->cp_desc_ring, 0,
2693                cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
2694         cpr->cp_raw_cons = 0;
2695 }
2696
2697 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2698 {
2699         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2700         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2701         struct bnxt_ring *ring = rxr->rx_ring_struct;
2702         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2703
2704         if (BNXT_HAS_RING_GRPS(bp))
2705                 bnxt_hwrm_ring_grp_free(bp, queue_index);
2706
2707         bnxt_hwrm_ring_free(bp, ring,
2708                             HWRM_RING_FREE_INPUT_RING_TYPE_RX,
2709                             cpr->cp_ring_struct->fw_ring_id);
2710         if (BNXT_HAS_RING_GRPS(bp))
2711                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
2712
2713         /* Check agg ring struct explicitly.
2714          * bnxt_need_agg_ring() returns the current state of offload flags,
2715          * but we may have to deal with agg ring struct before the offload
2716          * flags are updated.
2717          */
2718         if (!bnxt_need_agg_ring(bp->eth_dev) || rxr->ag_ring_struct == NULL)
2719                 goto no_agg;
2720
2721         ring = rxr->ag_ring_struct;
2722         bnxt_hwrm_ring_free(bp, ring,
2723                             BNXT_CHIP_P5(bp) ?
2724                             HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2725                             HWRM_RING_FREE_INPUT_RING_TYPE_RX,
2726                             cpr->cp_ring_struct->fw_ring_id);
2727         if (BNXT_HAS_RING_GRPS(bp))
2728                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
2729
2730 no_agg:
2731         bnxt_hwrm_stat_ctx_free(bp, cpr);
2732
2733         bnxt_free_cp_ring(bp, cpr);
2734
2735         if (BNXT_HAS_RING_GRPS(bp))
2736                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2737 }
2738
2739 int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index)
2740 {
2741         int rc;
2742         struct hwrm_ring_reset_input req = {.req_type = 0 };
2743         struct hwrm_ring_reset_output *resp = bp->hwrm_cmd_resp_addr;
2744
2745         HWRM_PREP(&req, HWRM_RING_RESET, BNXT_USE_CHIMP_MB);
2746
2747         req.ring_type = HWRM_RING_RESET_INPUT_RING_TYPE_RX_RING_GRP;
2748         req.ring_id = rte_cpu_to_le_16(bp->grp_info[queue_index].fw_grp_id);
2749         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2750
2751         HWRM_CHECK_RESULT();
2752
2753         HWRM_UNLOCK();
2754
2755         return rc;
2756 }
2757
2758 static int
2759 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2760 {
2761         unsigned int i;
2762
2763         for (i = 0; i < bp->tx_cp_nr_rings; i++)
2764                 bnxt_free_hwrm_tx_ring(bp, i);
2765
2766         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2767                 bnxt_free_hwrm_rx_ring(bp, i);
2768
2769         return 0;
2770 }
2771
2772 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2773 {
2774         uint16_t i;
2775         uint32_t rc = 0;
2776
2777         if (!BNXT_HAS_RING_GRPS(bp))
2778                 return 0;
2779
2780         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2781                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2782                 if (rc)
2783                         return rc;
2784         }
2785         return rc;
2786 }
2787
2788 /*
2789  * HWRM utility functions
2790  */
2791
2792 void bnxt_free_hwrm_resources(struct bnxt *bp)
2793 {
2794         /* Release memzone */
2795         rte_free(bp->hwrm_cmd_resp_addr);
2796         rte_free(bp->hwrm_short_cmd_req_addr);
2797         bp->hwrm_cmd_resp_addr = NULL;
2798         bp->hwrm_short_cmd_req_addr = NULL;
2799         bp->hwrm_cmd_resp_dma_addr = 0;
2800         bp->hwrm_short_cmd_req_dma_addr = 0;
2801 }
2802
2803 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2804 {
2805         struct rte_pci_device *pdev = bp->pdev;
2806         char type[RTE_MEMZONE_NAMESIZE];
2807
2808         sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2809                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2810         bp->max_resp_len = BNXT_PAGE_SIZE;
2811         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2812         if (bp->hwrm_cmd_resp_addr == NULL)
2813                 return -ENOMEM;
2814         bp->hwrm_cmd_resp_dma_addr =
2815                 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2816         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2817                 PMD_DRV_LOG(ERR,
2818                         "unable to map response address to physical memory\n");
2819                 return -ENOMEM;
2820         }
2821         rte_spinlock_init(&bp->hwrm_lock);
2822
2823         return 0;
2824 }
2825
2826 int
2827 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2828 {
2829         int rc = 0;
2830
2831         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2832                 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2833                 if (rc)
2834                         return rc;
2835         } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2836                 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2837                 if (rc)
2838                         return rc;
2839         }
2840
2841         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2842         return rc;
2843 }
2844
2845 static int
2846 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2847 {
2848         struct bnxt_filter_info *filter;
2849         int rc = 0;
2850
2851         STAILQ_FOREACH(filter, &vnic->filter, next) {
2852                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2853                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2854                 bnxt_free_filter(bp, filter);
2855         }
2856         return rc;
2857 }
2858
2859 static int
2860 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2861 {
2862         struct bnxt_filter_info *filter;
2863         struct rte_flow *flow;
2864         int rc = 0;
2865
2866         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2867                 flow = STAILQ_FIRST(&vnic->flow_list);
2868                 filter = flow->filter;
2869                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2870                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2871
2872                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2873                 rte_free(flow);
2874         }
2875         return rc;
2876 }
2877
2878 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2879 {
2880         struct bnxt_filter_info *filter;
2881         int rc = 0;
2882
2883         STAILQ_FOREACH(filter, &vnic->filter, next) {
2884                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2885                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2886                                                      filter);
2887                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2888                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2889                                                          filter);
2890                 else
2891                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2892                                                      filter);
2893                 if (rc)
2894                         break;
2895         }
2896         return rc;
2897 }
2898
2899 static void
2900 bnxt_free_tunnel_ports(struct bnxt *bp)
2901 {
2902         if (bp->vxlan_port_cnt)
2903                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2904                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2905
2906         if (bp->geneve_port_cnt)
2907                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2908                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2909 }
2910
2911 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2912 {
2913         int i;
2914
2915         if (bp->vnic_info == NULL)
2916                 return;
2917
2918         /*
2919          * Cleanup VNICs in reverse order, to make sure the L2 filter
2920          * from vnic0 is last to be cleaned up.
2921          */
2922         for (i = bp->max_vnics - 1; i >= 0; i--) {
2923                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2924
2925                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2926                         continue;
2927
2928                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2929
2930                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2931
2932                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2933
2934                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2935
2936                 bnxt_hwrm_vnic_free(bp, vnic);
2937
2938                 rte_free(vnic->fw_grp_ids);
2939         }
2940         /* Ring resources */
2941         bnxt_free_all_hwrm_rings(bp);
2942         bnxt_free_all_hwrm_ring_grps(bp);
2943         bnxt_free_all_hwrm_stat_ctxs(bp);
2944         bnxt_free_tunnel_ports(bp);
2945 }
2946
2947 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2948 {
2949         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2950
2951         if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
2952                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2953
2954         switch (conf_link_speed) {
2955         case RTE_ETH_LINK_SPEED_10M_HD:
2956         case RTE_ETH_LINK_SPEED_100M_HD:
2957                 /* FALLTHROUGH */
2958                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2959         }
2960         return hw_link_duplex;
2961 }
2962
2963 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2964 {
2965         return !conf_link;
2966 }
2967
2968 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2969                                           uint16_t pam4_link)
2970 {
2971         uint16_t eth_link_speed = 0;
2972
2973         if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
2974                 return RTE_ETH_LINK_SPEED_AUTONEG;
2975
2976         switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
2977         case RTE_ETH_LINK_SPEED_100M:
2978         case RTE_ETH_LINK_SPEED_100M_HD:
2979                 /* FALLTHROUGH */
2980                 eth_link_speed =
2981                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2982                 break;
2983         case RTE_ETH_LINK_SPEED_1G:
2984                 eth_link_speed =
2985                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2986                 break;
2987         case RTE_ETH_LINK_SPEED_2_5G:
2988                 eth_link_speed =
2989                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2990                 break;
2991         case RTE_ETH_LINK_SPEED_10G:
2992                 eth_link_speed =
2993                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2994                 break;
2995         case RTE_ETH_LINK_SPEED_20G:
2996                 eth_link_speed =
2997                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2998                 break;
2999         case RTE_ETH_LINK_SPEED_25G:
3000                 eth_link_speed =
3001                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
3002                 break;
3003         case RTE_ETH_LINK_SPEED_40G:
3004                 eth_link_speed =
3005                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3006                 break;
3007         case RTE_ETH_LINK_SPEED_50G:
3008                 eth_link_speed = pam4_link ?
3009                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
3010                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3011                 break;
3012         case RTE_ETH_LINK_SPEED_100G:
3013                 eth_link_speed = pam4_link ?
3014                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
3015                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3016                 break;
3017         case RTE_ETH_LINK_SPEED_200G:
3018                 eth_link_speed =
3019                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3020                 break;
3021         default:
3022                 PMD_DRV_LOG(ERR,
3023                         "Unsupported link speed %d; default to AUTO\n",
3024                         conf_link_speed);
3025                 break;
3026         }
3027         return eth_link_speed;
3028 }
3029
3030 #define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
3031                 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
3032                 RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
3033                 RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
3034                 RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
3035
3036 static int bnxt_validate_link_speed(struct bnxt *bp)
3037 {
3038         uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
3039         uint16_t port_id = bp->eth_dev->data->port_id;
3040         uint32_t link_speed_capa;
3041         uint32_t one_speed;
3042
3043         if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
3044                 return 0;
3045
3046         link_speed_capa = bnxt_get_speed_capabilities(bp);
3047
3048         if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
3049                 one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
3050
3051                 if (one_speed & (one_speed - 1)) {
3052                         PMD_DRV_LOG(ERR,
3053                                 "Invalid advertised speeds (%u) for port %u\n",
3054                                 link_speed, port_id);
3055                         return -EINVAL;
3056                 }
3057                 if ((one_speed & link_speed_capa) != one_speed) {
3058                         PMD_DRV_LOG(ERR,
3059                                 "Unsupported advertised speed (%u) for port %u\n",
3060                                 link_speed, port_id);
3061                         return -EINVAL;
3062                 }
3063         } else {
3064                 if (!(link_speed & link_speed_capa)) {
3065                         PMD_DRV_LOG(ERR,
3066                                 "Unsupported advertised speeds (%u) for port %u\n",
3067                                 link_speed, port_id);
3068                         return -EINVAL;
3069                 }
3070         }
3071         return 0;
3072 }
3073
3074 static uint16_t
3075 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
3076 {
3077         uint16_t ret = 0;
3078
3079         if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
3080                 if (bp->link_info->support_speeds)
3081                         return bp->link_info->support_speeds;
3082                 link_speed = BNXT_SUPPORTED_SPEEDS;
3083         }
3084
3085         if (link_speed & RTE_ETH_LINK_SPEED_100M)
3086                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3087         if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
3088                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3089         if (link_speed & RTE_ETH_LINK_SPEED_1G)
3090                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3091         if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
3092                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
3093         if (link_speed & RTE_ETH_LINK_SPEED_10G)
3094                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3095         if (link_speed & RTE_ETH_LINK_SPEED_20G)
3096                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
3097         if (link_speed & RTE_ETH_LINK_SPEED_25G)
3098                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
3099         if (link_speed & RTE_ETH_LINK_SPEED_40G)
3100                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
3101         if (link_speed & RTE_ETH_LINK_SPEED_50G)
3102                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
3103         if (link_speed & RTE_ETH_LINK_SPEED_100G)
3104                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
3105         if (link_speed & RTE_ETH_LINK_SPEED_200G)
3106                 ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3107         return ret;
3108 }
3109
3110 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
3111 {
3112         uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
3113
3114         switch (hw_link_speed) {
3115         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
3116                 eth_link_speed = RTE_ETH_SPEED_NUM_100M;
3117                 break;
3118         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
3119                 eth_link_speed = RTE_ETH_SPEED_NUM_1G;
3120                 break;
3121         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
3122                 eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
3123                 break;
3124         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
3125                 eth_link_speed = RTE_ETH_SPEED_NUM_10G;
3126                 break;
3127         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
3128                 eth_link_speed = RTE_ETH_SPEED_NUM_20G;
3129                 break;
3130         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
3131                 eth_link_speed = RTE_ETH_SPEED_NUM_25G;
3132                 break;
3133         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
3134                 eth_link_speed = RTE_ETH_SPEED_NUM_40G;
3135                 break;
3136         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
3137                 eth_link_speed = RTE_ETH_SPEED_NUM_50G;
3138                 break;
3139         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
3140                 eth_link_speed = RTE_ETH_SPEED_NUM_100G;
3141                 break;
3142         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
3143                 eth_link_speed = RTE_ETH_SPEED_NUM_200G;
3144                 break;
3145         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
3146         default:
3147                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
3148                         hw_link_speed);
3149                 break;
3150         }
3151         return eth_link_speed;
3152 }
3153
3154 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
3155 {
3156         uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3157
3158         switch (hw_link_duplex) {
3159         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3160         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3161                 /* FALLTHROUGH */
3162                 eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3163                 break;
3164         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3165                 eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
3166                 break;
3167         default:
3168                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3169                         hw_link_duplex);
3170                 break;
3171         }
3172         return eth_link_duplex;
3173 }
3174
3175 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3176 {
3177         int rc = 0;
3178         struct bnxt_link_info *link_info = bp->link_info;
3179
3180         rc = bnxt_hwrm_port_phy_qcaps(bp);
3181         if (rc)
3182                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3183
3184         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3185         if (rc) {
3186                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3187                 goto exit;
3188         }
3189
3190         if (link_info->link_speed)
3191                 link->link_speed =
3192                         bnxt_parse_hw_link_speed(link_info->link_speed);
3193         else
3194                 link->link_speed = RTE_ETH_SPEED_NUM_NONE;
3195         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3196         link->link_status = link_info->link_up;
3197         link->link_autoneg = link_info->auto_mode ==
3198                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3199                 RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
3200 exit:
3201         return rc;
3202 }
3203
3204 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3205 {
3206         int rc = 0;
3207         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3208         struct bnxt_link_info link_req;
3209         uint16_t speed, autoneg;
3210
3211         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3212                 return 0;
3213
3214         rc = bnxt_validate_link_speed(bp);
3215         if (rc)
3216                 goto error;
3217
3218         memset(&link_req, 0, sizeof(link_req));
3219         link_req.link_up = link_up;
3220         if (!link_up)
3221                 goto port_phy_cfg;
3222
3223         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3224         if (BNXT_CHIP_P5(bp) &&
3225             dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
3226                 /* 40G is not supported as part of media auto detect.
3227                  * The speed should be forced and autoneg disabled
3228                  * to configure 40G speed.
3229                  */
3230                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3231                 autoneg = 0;
3232         }
3233
3234         /* No auto speeds and no auto_pam4_link. Disable autoneg */
3235         if (bp->link_info->auto_link_speed == 0 &&
3236             bp->link_info->link_signal_mode &&
3237             bp->link_info->auto_pam4_link_speeds == 0)
3238                 autoneg = 0;
3239
3240         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3241                                           bp->link_info->link_signal_mode);
3242         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3243         /* Autoneg can be done only when the FW allows. */
3244         if (autoneg == 1 && bp->link_info->support_auto_speeds) {
3245                 link_req.phy_flags |=
3246                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3247                 link_req.auto_link_speed_mask =
3248                         bnxt_parse_eth_link_speed_mask(bp,
3249                                                        dev_conf->link_speeds);
3250                 link_req.auto_pam4_link_speeds =
3251                         bp->link_info->auto_pam4_link_speeds;
3252         } else {
3253                 if (bp->link_info->phy_type ==
3254                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3255                     bp->link_info->phy_type ==
3256                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3257                     bp->link_info->media_type ==
3258                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3259                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3260                         return -EINVAL;
3261                 }
3262
3263                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3264                 /* If user wants a particular speed try that first. */
3265                 if (speed)
3266                         link_req.link_speed = speed;
3267                 else if (bp->link_info->force_pam4_link_speed)
3268                         link_req.link_speed =
3269                                 bp->link_info->force_pam4_link_speed;
3270                 else if (bp->link_info->auto_pam4_link_speeds)
3271                         link_req.link_speed =
3272                                 bp->link_info->auto_pam4_link_speeds;
3273                 else if (bp->link_info->support_pam4_speeds)
3274                         link_req.link_speed =
3275                                 bp->link_info->support_pam4_speeds;
3276                 else if (bp->link_info->force_link_speed)
3277                         link_req.link_speed = bp->link_info->force_link_speed;
3278                 else
3279                         link_req.link_speed = bp->link_info->auto_link_speed;
3280                 /* Auto PAM4 link speed is zero, but auto_link_speed is not
3281                  * zero. Use the auto_link_speed.
3282                  */
3283                 if (bp->link_info->auto_link_speed != 0 &&
3284                     bp->link_info->auto_pam4_link_speeds == 0)
3285                         link_req.link_speed = bp->link_info->auto_link_speed;
3286         }
3287         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3288         link_req.auto_pause = bp->link_info->auto_pause;
3289         link_req.force_pause = bp->link_info->force_pause;
3290
3291 port_phy_cfg:
3292         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3293         if (rc) {
3294                 PMD_DRV_LOG(ERR,
3295                         "Set link config failed with rc %d\n", rc);
3296         }
3297
3298 error:
3299         return rc;
3300 }
3301
3302 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3303 {
3304         struct hwrm_func_qcfg_input req = {0};
3305         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3306         uint16_t flags;
3307         int rc = 0;
3308         bp->func_svif = BNXT_SVIF_INVALID;
3309         uint16_t svif_info;
3310
3311         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3312         req.fid = rte_cpu_to_le_16(0xffff);
3313
3314         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3315
3316         HWRM_CHECK_RESULT();
3317
3318         bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
3319
3320         svif_info = rte_le_to_cpu_16(resp->svif_info);
3321         if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3322                 bp->func_svif = svif_info &
3323                                      HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3324
3325         flags = rte_le_to_cpu_16(resp->flags);
3326         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3327                 bp->flags |= BNXT_FLAG_MULTI_HOST;
3328
3329         if (BNXT_VF(bp) &&
3330             !BNXT_VF_IS_TRUSTED(bp) &&
3331             (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3332                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3333                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3334         } else if (BNXT_VF(bp) &&
3335                    BNXT_VF_IS_TRUSTED(bp) &&
3336                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3337                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3338                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3339         }
3340
3341         if (mtu)
3342                 *mtu = rte_le_to_cpu_16(resp->admin_mtu);
3343
3344         switch (resp->port_partition_type) {
3345         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3346         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3347         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3348                 /* FALLTHROUGH */
3349                 bp->flags |= BNXT_FLAG_NPAR_PF;
3350                 break;
3351         default:
3352                 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3353                 break;
3354         }
3355
3356         bp->legacy_db_size =
3357                 rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
3358
3359         HWRM_UNLOCK();
3360
3361         return rc;
3362 }
3363
3364 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3365 {
3366         struct hwrm_func_qcfg_input req = {0};
3367         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3368         uint16_t flags;
3369         int rc;
3370
3371         if (!BNXT_VF_IS_TRUSTED(bp))
3372                 return 0;
3373
3374         if (!bp->parent)
3375                 return -EINVAL;
3376
3377         bp->parent->fid = BNXT_PF_FID_INVALID;
3378
3379         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3380
3381         req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3382
3383         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3384
3385         HWRM_CHECK_RESULT_SILENT();
3386
3387         memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3388         bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3389         bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3390         bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3391
3392         flags = rte_le_to_cpu_16(resp->flags);
3393         /* check for the multi-root support */
3394         if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_ROOT) {
3395                 bp->flags2 |= BNXT_FLAGS2_MULTIROOT_EN;
3396                 PMD_DRV_LOG(DEBUG, "PF enabled with multi root capability\n");
3397         }
3398
3399         HWRM_UNLOCK();
3400
3401         return 0;
3402 }
3403
3404 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3405                                  uint16_t *vnic_id, uint16_t *svif)
3406 {
3407         struct hwrm_func_qcfg_input req = {0};
3408         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3409         uint16_t svif_info;
3410         int rc = 0;
3411
3412         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3413         req.fid = rte_cpu_to_le_16(fid);
3414
3415         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3416
3417         HWRM_CHECK_RESULT();
3418
3419         if (vnic_id)
3420                 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3421
3422         svif_info = rte_le_to_cpu_16(resp->svif_info);
3423         if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3424                 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3425
3426         HWRM_UNLOCK();
3427
3428         return rc;
3429 }
3430
3431 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3432 {
3433         struct hwrm_port_mac_qcfg_input req = {0};
3434         struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3435         uint16_t port_svif_info;
3436         int rc;
3437
3438         bp->port_svif = BNXT_SVIF_INVALID;
3439
3440         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3441                 return 0;
3442
3443         HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3444
3445         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3446
3447         HWRM_CHECK_RESULT_SILENT();
3448
3449         port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3450         if (port_svif_info &
3451             HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3452                 bp->port_svif = port_svif_info &
3453                         HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3454
3455         HWRM_UNLOCK();
3456
3457         return 0;
3458 }
3459
3460 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
3461                                  struct bnxt_pf_resource_info *pf_resc)
3462 {
3463         struct hwrm_func_cfg_input req = {0};
3464         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3465         uint32_t enables;
3466         int rc;
3467
3468         enables = HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU |
3469                   HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU |
3470                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3471                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3472                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3473                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3474                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3475                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3476                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3477                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3478
3479         if (BNXT_HAS_RING_GRPS(bp)) {
3480                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3481                 req.num_hw_ring_grps =
3482                         rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
3483         } else if (BNXT_HAS_NQ(bp)) {
3484                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3485                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3486         }
3487
3488         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3489         req.admin_mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3490         req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu);
3491         req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3492         req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
3493         req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
3494         req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
3495         req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
3496         req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
3497         req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
3498         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3499         req.fid = rte_cpu_to_le_16(0xffff);
3500         req.enables = rte_cpu_to_le_32(enables);
3501
3502         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3503
3504         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3505
3506         HWRM_CHECK_RESULT();
3507         HWRM_UNLOCK();
3508
3509         return rc;
3510 }
3511
3512 /* min values are the guaranteed resources and max values are subject
3513  * to availability. The strategy for now is to keep both min & max
3514  * values the same.
3515  */
3516 static void
3517 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
3518                               struct hwrm_func_vf_resource_cfg_input *req,
3519                               int num_vfs)
3520 {
3521         req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3522                                                (num_vfs + 1));
3523         req->min_rsscos_ctx = req->max_rsscos_ctx;
3524         req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3525         req->min_stat_ctx = req->max_stat_ctx;
3526         req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3527                                                (num_vfs + 1));
3528         req->min_cmpl_rings = req->max_cmpl_rings;
3529         req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3530         req->min_tx_rings = req->max_tx_rings;
3531         req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3532         req->min_rx_rings = req->max_rx_rings;
3533         req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3534         req->min_l2_ctxs = req->max_l2_ctxs;
3535         /* TODO: For now, do not support VMDq/RFS on VFs. */
3536         req->max_vnics = rte_cpu_to_le_16(1);
3537         req->min_vnics = req->max_vnics;
3538         req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3539                                                  (num_vfs + 1));
3540         req->min_hw_ring_grps = req->max_hw_ring_grps;
3541         req->flags =
3542          rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
3543 }
3544
3545 static void
3546 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
3547                               struct hwrm_func_cfg_input *req,
3548                               int num_vfs)
3549 {
3550         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU |
3551                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3552                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3553                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3554                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3555                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3556                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3557                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3558                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3559                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3560
3561         req->admin_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3562                                           RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN *
3563                                           BNXT_NUM_VLANS);
3564         req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3565         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3566                                                 (num_vfs + 1));
3567         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3568         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3569                                                (num_vfs + 1));
3570         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3571         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3572         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3573         /* TODO: For now, do not support VMDq/RFS on VFs. */
3574         req->num_vnics = rte_cpu_to_le_16(1);
3575         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3576                                                  (num_vfs + 1));
3577 }
3578
3579 /* Update the port wide resource values based on how many resources
3580  * got allocated to the VF.
3581  */
3582 static int bnxt_update_max_resources(struct bnxt *bp,
3583                                      int vf)
3584 {
3585         struct hwrm_func_qcfg_input req = {0};
3586         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3587         int rc;
3588
3589         /* Get the actual allocated values now */
3590         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3591         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3592         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3593         HWRM_CHECK_RESULT();
3594
3595         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3596         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
3597         bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3598         bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
3599         bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
3600         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
3601         bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3602
3603         HWRM_UNLOCK();
3604
3605         return 0;
3606 }
3607
3608 /* Update the PF resource values based on how many resources
3609  * got allocated to it.
3610  */
3611 static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
3612 {
3613         struct hwrm_func_qcfg_input req = {0};
3614         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3615         int rc;
3616
3617         /* Get the actual allocated values now */
3618         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3619         req.fid = rte_cpu_to_le_16(0xffff);
3620         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3621         HWRM_CHECK_RESULT();
3622
3623         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3624         bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3625         bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3626         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3627         bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3628         bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3629         bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3630         bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
3631
3632         HWRM_UNLOCK();
3633
3634         return 0;
3635 }
3636
3637 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3638 {
3639         struct hwrm_func_qcfg_input req = {0};
3640         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3641         int rc;
3642
3643         /* Check for zero MAC address */
3644         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3645         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3646         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3647         HWRM_CHECK_RESULT();
3648         rc = rte_le_to_cpu_16(resp->vlan);
3649
3650         HWRM_UNLOCK();
3651
3652         return rc;
3653 }
3654
3655 static int bnxt_query_pf_resources(struct bnxt *bp,
3656                                    struct bnxt_pf_resource_info *pf_resc)
3657 {
3658         struct hwrm_func_qcfg_input req = {0};
3659         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3660         int rc;
3661
3662         /* And copy the allocated numbers into the pf struct */
3663         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3664         req.fid = rte_cpu_to_le_16(0xffff);
3665         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3666         HWRM_CHECK_RESULT();
3667
3668         pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3669         pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3670         pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3671         pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3672         pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3673         pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3674         pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
3675         bp->pf->evb_mode = resp->evb_mode;
3676
3677         HWRM_UNLOCK();
3678
3679         return rc;
3680 }
3681
3682 static void
3683 bnxt_calculate_pf_resources(struct bnxt *bp,
3684                             struct bnxt_pf_resource_info *pf_resc,
3685                             int num_vfs)
3686 {
3687         if (!num_vfs) {
3688                 pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
3689                 pf_resc->num_stat_ctxs = bp->max_stat_ctx;
3690                 pf_resc->num_cp_rings = bp->max_cp_rings;
3691                 pf_resc->num_tx_rings = bp->max_tx_rings;
3692                 pf_resc->num_rx_rings = bp->max_rx_rings;
3693                 pf_resc->num_l2_ctxs = bp->max_l2_ctx;
3694                 pf_resc->num_hw_ring_grps = bp->max_ring_grps;
3695
3696                 return;
3697         }
3698
3699         pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
3700                                    bp->max_rsscos_ctx % (num_vfs + 1);
3701         pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
3702                                  bp->max_stat_ctx % (num_vfs + 1);
3703         pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
3704                                 bp->max_cp_rings % (num_vfs + 1);
3705         pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
3706                                 bp->max_tx_rings % (num_vfs + 1);
3707         pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
3708                                 bp->max_rx_rings % (num_vfs + 1);
3709         pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
3710                                bp->max_l2_ctx % (num_vfs + 1);
3711         pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
3712                                     bp->max_ring_grps % (num_vfs + 1);
3713 }
3714
3715 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3716 {
3717         struct bnxt_pf_resource_info pf_resc = { 0 };
3718         int rc;
3719
3720         if (!BNXT_PF(bp)) {
3721                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3722                 return -EINVAL;
3723         }
3724
3725         rc = bnxt_hwrm_func_qcaps(bp);
3726         if (rc)
3727                 return rc;
3728
3729         bnxt_calculate_pf_resources(bp, &pf_resc, 0);
3730
3731         bp->pf->func_cfg_flags &=
3732                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3733                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3734         bp->pf->func_cfg_flags |=
3735                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3736
3737         rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
3738         if (rc)
3739                 return rc;
3740
3741         rc = bnxt_update_max_resources_pf_only(bp);
3742
3743         return rc;
3744 }
3745
3746 static int
3747 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
3748 {
3749         size_t req_buf_sz, sz;
3750         int i, rc;
3751
3752         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3753         bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3754                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3755         if (bp->pf->vf_req_buf == NULL) {
3756                 return -ENOMEM;
3757         }
3758
3759         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3760                 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3761
3762         for (i = 0; i < num_vfs; i++)
3763                 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3764                                              (i * HWRM_MAX_REQ_LEN);
3765
3766         rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
3767         if (rc)
3768                 rte_free(bp->pf->vf_req_buf);
3769
3770         return rc;
3771 }
3772
3773 static int
3774 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
3775 {
3776         struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3777         struct hwrm_func_vf_resource_cfg_input req = {0};
3778         int i, rc = 0;
3779
3780         bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
3781         bp->pf->active_vfs = 0;
3782         for (i = 0; i < num_vfs; i++) {
3783                 HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
3784                 req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3785                 rc = bnxt_hwrm_send_message(bp,
3786                                             &req,
3787                                             sizeof(req),
3788                                             BNXT_USE_CHIMP_MB);
3789                 if (rc || resp->error_code) {
3790                         PMD_DRV_LOG(ERR,
3791                                 "Failed to initialize VF %d\n", i);
3792                         PMD_DRV_LOG(ERR,
3793                                 "Not all VFs available. (%d, %d)\n",
3794                                 rc, resp->error_code);
3795                         HWRM_UNLOCK();
3796
3797                         /* If the first VF configuration itself fails,
3798                          * unregister the vf_fwd_request buffer.
3799                          */
3800                         if (i == 0)
3801                                 bnxt_hwrm_func_buf_unrgtr(bp);
3802                         break;
3803                 }
3804                 HWRM_UNLOCK();
3805
3806                 /* Update the max resource values based on the resource values
3807                  * allocated to the VF.
3808                  */
3809                 bnxt_update_max_resources(bp, i);
3810                 bp->pf->active_vfs++;
3811                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3812         }
3813
3814         return 0;
3815 }
3816
3817 static int
3818 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
3819 {
3820         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3821         struct hwrm_func_cfg_input req = {0};
3822         int i, rc;
3823
3824         bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
3825
3826         bp->pf->active_vfs = 0;
3827         for (i = 0; i < num_vfs; i++) {
3828                 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3829                 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3830                 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3831                 rc = bnxt_hwrm_send_message(bp,
3832                                             &req,
3833                                             sizeof(req),
3834                                             BNXT_USE_CHIMP_MB);
3835
3836                 /* Clear enable flag for next pass */
3837                 req.enables &= ~rte_cpu_to_le_32(
3838                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3839
3840                 if (rc || resp->error_code) {
3841                         PMD_DRV_LOG(ERR,
3842                                 "Failed to initialize VF %d\n", i);
3843                         PMD_DRV_LOG(ERR,
3844                                 "Not all VFs available. (%d, %d)\n",
3845                                 rc, resp->error_code);
3846                         HWRM_UNLOCK();
3847
3848                         /* If the first VF configuration itself fails,
3849                          * unregister the vf_fwd_request buffer.
3850                          */
3851                         if (i == 0)
3852                                 bnxt_hwrm_func_buf_unrgtr(bp);
3853                         break;
3854                 }
3855
3856                 HWRM_UNLOCK();
3857
3858                 /* Update the max resource values based on the resource values
3859                  * allocated to the VF.
3860                  */
3861                 bnxt_update_max_resources(bp, i);
3862                 bp->pf->active_vfs++;
3863                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3864         }
3865
3866         return 0;
3867 }
3868
3869 static void
3870 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
3871 {
3872         if (bp->flags & BNXT_FLAG_NEW_RM)
3873                 bnxt_process_vf_resc_config_new(bp, num_vfs);
3874         else
3875                 bnxt_process_vf_resc_config_old(bp, num_vfs);
3876 }
3877
3878 static void
3879 bnxt_update_pf_resources(struct bnxt *bp,
3880                          struct bnxt_pf_resource_info *pf_resc)
3881 {
3882         bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
3883         bp->max_stat_ctx = pf_resc->num_stat_ctxs;
3884         bp->max_cp_rings = pf_resc->num_cp_rings;
3885         bp->max_tx_rings = pf_resc->num_tx_rings;
3886         bp->max_rx_rings = pf_resc->num_rx_rings;
3887         bp->max_ring_grps = pf_resc->num_hw_ring_grps;
3888 }
3889
3890 static int32_t
3891 bnxt_configure_pf_resources(struct bnxt *bp,
3892                             struct bnxt_pf_resource_info *pf_resc)
3893 {
3894         /*
3895          * We're using STD_TX_RING_MODE here which will limit the TX
3896          * rings. This will allow QoS to function properly. Not setting this
3897          * will cause PF rings to break bandwidth settings.
3898          */
3899         bp->pf->func_cfg_flags &=
3900                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3901                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3902         bp->pf->func_cfg_flags |=
3903                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3904         return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
3905 }
3906
3907 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3908 {
3909         struct bnxt_pf_resource_info pf_resc = { 0 };
3910         int rc;
3911
3912         if (!BNXT_PF(bp)) {
3913                 PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3914                 return -EINVAL;
3915         }
3916
3917         rc = bnxt_hwrm_func_qcaps(bp);
3918         if (rc)
3919                 return rc;
3920
3921         bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
3922
3923         rc = bnxt_configure_pf_resources(bp, &pf_resc);
3924         if (rc)
3925                 return rc;
3926
3927         rc = bnxt_query_pf_resources(bp, &pf_resc);
3928         if (rc)
3929                 return rc;
3930
3931         /*
3932          * Now, create and register a buffer to hold forwarded VF requests
3933          */
3934         rc = bnxt_configure_vf_req_buf(bp, num_vfs);
3935         if (rc)
3936                 return rc;
3937
3938         bnxt_configure_vf_resources(bp, num_vfs);
3939
3940         bnxt_update_pf_resources(bp, &pf_resc);
3941
3942         return 0;
3943 }
3944
3945 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3946 {
3947         struct hwrm_func_cfg_input req = {0};
3948         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3949         int rc;
3950
3951         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3952
3953         req.fid = rte_cpu_to_le_16(0xffff);
3954         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3955         req.evb_mode = bp->pf->evb_mode;
3956
3957         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3958         HWRM_CHECK_RESULT();
3959         HWRM_UNLOCK();
3960
3961         return rc;
3962 }
3963
3964 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3965                                 uint8_t tunnel_type)
3966 {
3967         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3968         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3969         int rc = 0;
3970
3971         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3972         req.tunnel_type = tunnel_type;
3973         req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3974         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3975         HWRM_CHECK_RESULT();
3976
3977         switch (tunnel_type) {
3978         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3979                 bp->vxlan_fw_dst_port_id =
3980                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3981                 bp->vxlan_port = port;
3982                 break;
3983         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3984                 bp->geneve_fw_dst_port_id =
3985                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3986                 bp->geneve_port = port;
3987                 break;
3988         default:
3989                 break;
3990         }
3991
3992         HWRM_UNLOCK();
3993
3994         return rc;
3995 }
3996
3997 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3998                                 uint8_t tunnel_type)
3999 {
4000         struct hwrm_tunnel_dst_port_free_input req = {0};
4001         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
4002         int rc = 0;
4003
4004         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
4005
4006         req.tunnel_type = tunnel_type;
4007         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
4008         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4009
4010         HWRM_CHECK_RESULT();
4011         HWRM_UNLOCK();
4012
4013         if (tunnel_type ==
4014             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
4015                 bp->vxlan_port = 0;
4016                 bp->vxlan_port_cnt = 0;
4017         }
4018
4019         if (tunnel_type ==
4020             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
4021                 bp->geneve_port = 0;
4022                 bp->geneve_port_cnt = 0;
4023         }
4024
4025         return rc;
4026 }
4027
4028 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
4029                                         uint32_t flags)
4030 {
4031         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4032         struct hwrm_func_cfg_input req = {0};
4033         int rc;
4034
4035         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4036
4037         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4038         req.flags = rte_cpu_to_le_32(flags);
4039         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4040
4041         HWRM_CHECK_RESULT();
4042         HWRM_UNLOCK();
4043
4044         return rc;
4045 }
4046
4047 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
4048 {
4049         uint32_t *flag = flagp;
4050
4051         vnic->flags = *flag;
4052 }
4053
4054 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4055 {
4056         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
4057 }
4058
4059 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
4060 {
4061         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4062         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
4063         int rc;
4064
4065         HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
4066
4067         req.req_buf_num_pages = rte_cpu_to_le_16(1);
4068         req.req_buf_page_size =
4069                 rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
4070         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
4071         req.req_buf_page_addr0 =
4072                 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
4073         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
4074                 PMD_DRV_LOG(ERR,
4075                         "unable to map buffer address to physical memory\n");
4076                 HWRM_UNLOCK();
4077                 return -ENOMEM;
4078         }
4079
4080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4081
4082         HWRM_CHECK_RESULT();
4083         HWRM_UNLOCK();
4084
4085         return rc;
4086 }
4087
4088 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
4089 {
4090         int rc = 0;
4091         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
4092         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
4093
4094         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
4095                 return 0;
4096
4097         HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
4098
4099         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4100
4101         HWRM_CHECK_RESULT();
4102         HWRM_UNLOCK();
4103
4104         return rc;
4105 }
4106
4107 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
4108 {
4109         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4110         struct hwrm_func_cfg_input req = {0};
4111         int rc;
4112
4113         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4114
4115         req.fid = rte_cpu_to_le_16(0xffff);
4116         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
4117         req.enables = rte_cpu_to_le_32(
4118                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
4119         req.async_event_cr = rte_cpu_to_le_16(
4120                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4121         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4122
4123         HWRM_CHECK_RESULT();
4124         HWRM_UNLOCK();
4125
4126         return rc;
4127 }
4128
4129 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
4130 {
4131         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4132         struct hwrm_func_vf_cfg_input req = {0};
4133         int rc;
4134
4135         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4136
4137         req.enables = rte_cpu_to_le_32(
4138                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
4139         req.async_event_cr = rte_cpu_to_le_16(
4140                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4141         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4142
4143         HWRM_CHECK_RESULT();
4144         HWRM_UNLOCK();
4145
4146         return rc;
4147 }
4148
4149 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
4150 {
4151         struct hwrm_func_cfg_input req = {0};
4152         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4153         uint16_t dflt_vlan, fid;
4154         uint32_t func_cfg_flags;
4155         int rc = 0;
4156
4157         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4158
4159         if (is_vf) {
4160                 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
4161                 fid = bp->pf->vf_info[vf].fid;
4162                 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
4163         } else {
4164                 fid = rte_cpu_to_le_16(0xffff);
4165                 func_cfg_flags = bp->pf->func_cfg_flags;
4166                 dflt_vlan = bp->vlan;
4167         }
4168
4169         req.flags = rte_cpu_to_le_32(func_cfg_flags);
4170         req.fid = rte_cpu_to_le_16(fid);
4171         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4172         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
4173
4174         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4175
4176         HWRM_CHECK_RESULT();
4177         HWRM_UNLOCK();
4178
4179         return rc;
4180 }
4181
4182 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
4183                         uint16_t max_bw, uint16_t enables)
4184 {
4185         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4186         struct hwrm_func_cfg_input req = {0};
4187         int rc;
4188
4189         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4190
4191         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4192         req.enables |= rte_cpu_to_le_32(enables);
4193         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4194         req.max_bw = rte_cpu_to_le_32(max_bw);
4195         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4196
4197         HWRM_CHECK_RESULT();
4198         HWRM_UNLOCK();
4199
4200         return rc;
4201 }
4202
4203 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
4204 {
4205         struct hwrm_func_cfg_input req = {0};
4206         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4207         int rc = 0;
4208
4209         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4210
4211         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4212         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4213         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4214         req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
4215
4216         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4217
4218         HWRM_CHECK_RESULT();
4219         HWRM_UNLOCK();
4220
4221         return rc;
4222 }
4223
4224 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
4225 {
4226         int rc;
4227
4228         if (BNXT_PF(bp))
4229                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
4230         else
4231                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
4232
4233         return rc;
4234 }
4235
4236 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
4237                               void *encaped, size_t ec_size)
4238 {
4239         int rc = 0;
4240         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
4241         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4242
4243         if (ec_size > sizeof(req.encap_request))
4244                 return -1;
4245
4246         HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
4247
4248         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4249         memcpy(req.encap_request, encaped, ec_size);
4250
4251         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4252
4253         HWRM_CHECK_RESULT();
4254         HWRM_UNLOCK();
4255
4256         return rc;
4257 }
4258
4259 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
4260                                        struct rte_ether_addr *mac)
4261 {
4262         struct hwrm_func_qcfg_input req = {0};
4263         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4264         int rc;
4265
4266         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4267
4268         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4269         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4270
4271         HWRM_CHECK_RESULT();
4272
4273         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
4274
4275         HWRM_UNLOCK();
4276
4277         return rc;
4278 }
4279
4280 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
4281                             void *encaped, size_t ec_size)
4282 {
4283         int rc = 0;
4284         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
4285         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4286
4287         if (ec_size > sizeof(req.encap_request))
4288                 return -1;
4289
4290         HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
4291
4292         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4293         memcpy(req.encap_request, encaped, ec_size);
4294
4295         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4296
4297         HWRM_CHECK_RESULT();
4298         HWRM_UNLOCK();
4299
4300         return rc;
4301 }
4302
4303 static void bnxt_update_prev_stat(uint64_t *cntr, uint64_t *prev_cntr)
4304 {
4305         /* One of the HW stat values that make up this counter was zero as
4306          * returned by HW in this iteration, so use the previous
4307          * iteration's counter value
4308          */
4309         if (*prev_cntr && *cntr == 0)
4310                 *cntr = *prev_cntr;
4311         else
4312                 *prev_cntr = *cntr;
4313 }
4314
4315 int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
4316                          struct bnxt_ring_stats *ring_stats, bool rx)
4317 {
4318         int rc = 0;
4319         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
4320         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
4321
4322         HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
4323
4324         req.stat_ctx_id = rte_cpu_to_le_32(cid);
4325
4326         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4327
4328         HWRM_CHECK_RESULT();
4329
4330         if (rx) {
4331                 struct bnxt_ring_stats *prev_stats = &bp->prev_rx_ring_stats[idx];
4332
4333                 ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4334                 bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts,
4335                                       &prev_stats->rx_ucast_pkts);
4336
4337                 ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts);
4338                 bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts,
4339                                       &prev_stats->rx_mcast_pkts);
4340
4341                 ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts);
4342                 bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts,
4343                                       &prev_stats->rx_bcast_pkts);
4344
4345                 ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4346                 bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes,
4347                                       &prev_stats->rx_ucast_bytes);
4348
4349                 ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes);
4350                 bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes,
4351                                       &prev_stats->rx_mcast_bytes);
4352
4353                 ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes);
4354                 bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes,
4355                                       &prev_stats->rx_bcast_bytes);
4356
4357                 ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts);
4358                 bnxt_update_prev_stat(&ring_stats->rx_discard_pkts,
4359                                       &prev_stats->rx_discard_pkts);
4360
4361                 ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts);
4362                 bnxt_update_prev_stat(&ring_stats->rx_error_pkts,
4363                                       &prev_stats->rx_error_pkts);
4364
4365                 ring_stats->rx_agg_pkts = rte_le_to_cpu_64(resp->rx_agg_pkts);
4366                 bnxt_update_prev_stat(&ring_stats->rx_agg_pkts,
4367                                       &prev_stats->rx_agg_pkts);
4368
4369                 ring_stats->rx_agg_bytes = rte_le_to_cpu_64(resp->rx_agg_bytes);
4370                 bnxt_update_prev_stat(&ring_stats->rx_agg_bytes,
4371                                       &prev_stats->rx_agg_bytes);
4372
4373                 ring_stats->rx_agg_events = rte_le_to_cpu_64(resp->rx_agg_events);
4374                 bnxt_update_prev_stat(&ring_stats->rx_agg_events,
4375                                       &prev_stats->rx_agg_events);
4376
4377                 ring_stats->rx_agg_aborts = rte_le_to_cpu_64(resp->rx_agg_aborts);
4378                 bnxt_update_prev_stat(&ring_stats->rx_agg_aborts,
4379                                       &prev_stats->rx_agg_aborts);
4380         } else {
4381                 struct bnxt_ring_stats *prev_stats = &bp->prev_tx_ring_stats[idx];
4382
4383                 ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4384                 bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts,
4385                                       &prev_stats->tx_ucast_pkts);
4386
4387                 ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts);
4388                 bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts,
4389                                       &prev_stats->tx_mcast_pkts);
4390
4391                 ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts);
4392                 bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts,
4393                                       &prev_stats->tx_bcast_pkts);
4394
4395                 ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4396                 bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes,
4397                                       &prev_stats->tx_ucast_bytes);
4398
4399                 ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes);
4400                 bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes,
4401                                       &prev_stats->tx_mcast_bytes);
4402
4403                 ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes);
4404                 bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes,
4405                                       &prev_stats->tx_bcast_bytes);
4406
4407                 ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts);
4408                 bnxt_update_prev_stat(&ring_stats->tx_discard_pkts,
4409                                       &prev_stats->tx_discard_pkts);
4410         }
4411
4412         HWRM_UNLOCK();
4413
4414         return rc;
4415 }
4416
4417 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4418 {
4419         struct hwrm_port_qstats_input req = {0};
4420         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4421         struct bnxt_pf_info *pf = bp->pf;
4422         int rc;
4423
4424         HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4425
4426         req.port_id = rte_cpu_to_le_16(pf->port_id);
4427         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4428         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4429         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4430
4431         HWRM_CHECK_RESULT();
4432         HWRM_UNLOCK();
4433
4434         return rc;
4435 }
4436
4437 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4438 {
4439         struct hwrm_port_clr_stats_input req = {0};
4440         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4441         struct bnxt_pf_info *pf = bp->pf;
4442         int rc;
4443
4444         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
4445         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4446             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4447                 return 0;
4448
4449         HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4450
4451         req.port_id = rte_cpu_to_le_16(pf->port_id);
4452         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4453
4454         HWRM_CHECK_RESULT();
4455         HWRM_UNLOCK();
4456
4457         return rc;
4458 }
4459
4460 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4461 {
4462         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4463         struct hwrm_port_led_qcaps_input req = {0};
4464         int rc;
4465
4466         if (BNXT_VF(bp))
4467                 return 0;
4468
4469         HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4470         req.port_id = bp->pf->port_id;
4471         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4472
4473         HWRM_CHECK_RESULT_SILENT();
4474
4475         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4476                 unsigned int i;
4477
4478                 bp->leds->num_leds = resp->num_leds;
4479                 memcpy(bp->leds, &resp->led0_id,
4480                         sizeof(bp->leds[0]) * bp->leds->num_leds);
4481                 for (i = 0; i < bp->leds->num_leds; i++) {
4482                         struct bnxt_led_info *led = &bp->leds[i];
4483
4484                         uint16_t caps = led->led_state_caps;
4485
4486                         if (!led->led_group_id ||
4487                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4488                                 bp->leds->num_leds = 0;
4489                                 break;
4490                         }
4491                 }
4492         }
4493
4494         HWRM_UNLOCK();
4495
4496         return rc;
4497 }
4498
4499 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4500 {
4501         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4502         struct hwrm_port_led_cfg_input req = {0};
4503         struct bnxt_led_cfg *led_cfg;
4504         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4505         uint16_t duration = 0;
4506         int rc, i;
4507
4508         if (!bp->leds->num_leds || BNXT_VF(bp))
4509                 return -EOPNOTSUPP;
4510
4511         HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4512
4513         if (led_on) {
4514                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4515                 duration = rte_cpu_to_le_16(500);
4516         }
4517         req.port_id = bp->pf->port_id;
4518         req.num_leds = bp->leds->num_leds;
4519         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4520         for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4521                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4522                 led_cfg->led_id = bp->leds[i].led_id;
4523                 led_cfg->led_state = led_state;
4524                 led_cfg->led_blink_on = duration;
4525                 led_cfg->led_blink_off = duration;
4526                 led_cfg->led_group_id = bp->leds[i].led_group_id;
4527         }
4528
4529         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4530
4531         HWRM_CHECK_RESULT();
4532         HWRM_UNLOCK();
4533
4534         return rc;
4535 }
4536
4537 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4538                                uint32_t *length)
4539 {
4540         int rc;
4541         struct hwrm_nvm_get_dir_info_input req = {0};
4542         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4543
4544         HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4545
4546         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4547
4548         HWRM_CHECK_RESULT();
4549
4550         *entries = rte_le_to_cpu_32(resp->entries);
4551         *length = rte_le_to_cpu_32(resp->entry_length);
4552
4553         HWRM_UNLOCK();
4554         return rc;
4555 }
4556
4557 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4558 {
4559         int rc;
4560         uint32_t dir_entries;
4561         uint32_t entry_length;
4562         uint8_t *buf;
4563         size_t buflen;
4564         rte_iova_t dma_handle;
4565         struct hwrm_nvm_get_dir_entries_input req = {0};
4566         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4567
4568         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4569         if (rc != 0)
4570                 return rc;
4571
4572         *data++ = dir_entries;
4573         *data++ = entry_length;
4574         len -= 2;
4575         memset(data, 0xff, len);
4576
4577         buflen = dir_entries * entry_length;
4578         buf = rte_malloc("nvm_dir", buflen, 0);
4579         if (buf == NULL)
4580                 return -ENOMEM;
4581         dma_handle = rte_malloc_virt2iova(buf);
4582         if (dma_handle == RTE_BAD_IOVA) {
4583                 rte_free(buf);
4584                 PMD_DRV_LOG(ERR,
4585                         "unable to map response address to physical memory\n");
4586                 return -ENOMEM;
4587         }
4588         HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4589         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4590         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4591
4592         if (rc == 0)
4593                 memcpy(data, buf, len > buflen ? buflen : len);
4594
4595         rte_free(buf);
4596         HWRM_CHECK_RESULT();
4597         HWRM_UNLOCK();
4598
4599         return rc;
4600 }
4601
4602 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4603                              uint32_t offset, uint32_t length,
4604                              uint8_t *data)
4605 {
4606         int rc;
4607         uint8_t *buf;
4608         rte_iova_t dma_handle;
4609         struct hwrm_nvm_read_input req = {0};
4610         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4611
4612         buf = rte_malloc("nvm_item", length, 0);
4613         if (!buf)
4614                 return -ENOMEM;
4615
4616         dma_handle = rte_malloc_virt2iova(buf);
4617         if (dma_handle == RTE_BAD_IOVA) {
4618                 rte_free(buf);
4619                 PMD_DRV_LOG(ERR,
4620                         "unable to map response address to physical memory\n");
4621                 return -ENOMEM;
4622         }
4623         HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4624         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4625         req.dir_idx = rte_cpu_to_le_16(index);
4626         req.offset = rte_cpu_to_le_32(offset);
4627         req.len = rte_cpu_to_le_32(length);
4628         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4629         if (rc == 0)
4630                 memcpy(data, buf, length);
4631
4632         rte_free(buf);
4633         HWRM_CHECK_RESULT();
4634         HWRM_UNLOCK();
4635
4636         return rc;
4637 }
4638
4639 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4640 {
4641         int rc;
4642         struct hwrm_nvm_erase_dir_entry_input req = {0};
4643         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4644
4645         HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4646         req.dir_idx = rte_cpu_to_le_16(index);
4647         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4648         HWRM_CHECK_RESULT();
4649         HWRM_UNLOCK();
4650
4651         return rc;
4652 }
4653
4654 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4655                           uint16_t dir_ordinal, uint16_t dir_ext,
4656                           uint16_t dir_attr, const uint8_t *data,
4657                           size_t data_len)
4658 {
4659         int rc;
4660         struct hwrm_nvm_write_input req = {0};
4661         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4662         rte_iova_t dma_handle;
4663         uint8_t *buf;
4664
4665         buf = rte_malloc("nvm_write", data_len, 0);
4666         if (!buf)
4667                 return -ENOMEM;
4668
4669         dma_handle = rte_malloc_virt2iova(buf);
4670         if (dma_handle == RTE_BAD_IOVA) {
4671                 rte_free(buf);
4672                 PMD_DRV_LOG(ERR,
4673                         "unable to map response address to physical memory\n");
4674                 return -ENOMEM;
4675         }
4676         memcpy(buf, data, data_len);
4677
4678         HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4679
4680         req.dir_type = rte_cpu_to_le_16(dir_type);
4681         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4682         req.dir_ext = rte_cpu_to_le_16(dir_ext);
4683         req.dir_attr = rte_cpu_to_le_16(dir_attr);
4684         req.dir_data_length = rte_cpu_to_le_32(data_len);
4685         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4686
4687         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4688
4689         rte_free(buf);
4690         HWRM_CHECK_RESULT();
4691         HWRM_UNLOCK();
4692
4693         return rc;
4694 }
4695
4696 static void
4697 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4698 {
4699         uint32_t *count = cbdata;
4700
4701         *count = *count + 1;
4702 }
4703
4704 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4705                                      struct bnxt_vnic_info *vnic __rte_unused)
4706 {
4707         return 0;
4708 }
4709
4710 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4711 {
4712         uint32_t count = 0;
4713
4714         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4715             &count, bnxt_vnic_count_hwrm_stub);
4716
4717         return count;
4718 }
4719
4720 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4721                                         uint16_t *vnic_ids)
4722 {
4723         struct hwrm_func_vf_vnic_ids_query_input req = {0};
4724         struct hwrm_func_vf_vnic_ids_query_output *resp =
4725                                                 bp->hwrm_cmd_resp_addr;
4726         int rc;
4727
4728         /* First query all VNIC ids */
4729         HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4730
4731         req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4732         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4733         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4734
4735         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4736                 HWRM_UNLOCK();
4737                 PMD_DRV_LOG(ERR,
4738                 "unable to map VNIC ID table address to physical memory\n");
4739                 return -ENOMEM;
4740         }
4741         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4742         HWRM_CHECK_RESULT();
4743         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4744
4745         HWRM_UNLOCK();
4746
4747         return rc;
4748 }
4749
4750 /*
4751  * This function queries the VNIC IDs  for a specified VF. It then calls
4752  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4753  * Then it calls the hwrm_cb function to program this new vnic configuration.
4754  */
4755 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4756         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4757         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4758 {
4759         struct bnxt_vnic_info vnic;
4760         int rc = 0;
4761         int i, num_vnic_ids;
4762         uint16_t *vnic_ids;
4763         size_t vnic_id_sz;
4764         size_t sz;
4765
4766         /* First query all VNIC ids */
4767         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4768         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4769                         RTE_CACHE_LINE_SIZE);
4770         if (vnic_ids == NULL)
4771                 return -ENOMEM;
4772
4773         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4774                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4775
4776         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4777
4778         if (num_vnic_ids < 0)
4779                 return num_vnic_ids;
4780
4781         /* Retrieve VNIC, update bd_stall then update */
4782
4783         for (i = 0; i < num_vnic_ids; i++) {
4784                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4785                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4786                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4787                 if (rc)
4788                         break;
4789                 if (vnic.mru <= 4)      /* Indicates unallocated */
4790                         continue;
4791
4792                 vnic_cb(&vnic, cbdata);
4793
4794                 rc = hwrm_cb(bp, &vnic);
4795                 if (rc)
4796                         break;
4797         }
4798
4799         rte_free(vnic_ids);
4800
4801         return rc;
4802 }
4803
4804 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4805                                               bool on)
4806 {
4807         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4808         struct hwrm_func_cfg_input req = {0};
4809         int rc;
4810
4811         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4812
4813         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4814         req.enables |= rte_cpu_to_le_32(
4815                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4816         req.vlan_antispoof_mode = on ?
4817                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4818                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4819         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4820
4821         HWRM_CHECK_RESULT();
4822         HWRM_UNLOCK();
4823
4824         return rc;
4825 }
4826
4827 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4828 {
4829         struct bnxt_vnic_info vnic;
4830         uint16_t *vnic_ids;
4831         size_t vnic_id_sz;
4832         int num_vnic_ids, i;
4833         size_t sz;
4834         int rc;
4835
4836         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4837         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4838                         RTE_CACHE_LINE_SIZE);
4839         if (vnic_ids == NULL)
4840                 return -ENOMEM;
4841
4842         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4843                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4844
4845         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4846         if (rc <= 0)
4847                 goto exit;
4848         num_vnic_ids = rc;
4849
4850         /*
4851          * Loop through to find the default VNIC ID.
4852          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4853          * by sending the hwrm_func_qcfg command to the firmware.
4854          */
4855         for (i = 0; i < num_vnic_ids; i++) {
4856                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4857                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4858                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4859                                         bp->pf->first_vf_id + vf);
4860                 if (rc)
4861                         goto exit;
4862                 if (vnic.func_default) {
4863                         rte_free(vnic_ids);
4864                         return vnic.fw_vnic_id;
4865                 }
4866         }
4867         /* Could not find a default VNIC. */
4868         PMD_DRV_LOG(ERR, "No default VNIC\n");
4869 exit:
4870         rte_free(vnic_ids);
4871         return rc;
4872 }
4873
4874 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4875                          uint16_t dst_id,
4876                          struct bnxt_filter_info *filter)
4877 {
4878         int rc = 0;
4879         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4880         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4881         uint32_t enables = 0;
4882
4883         if (filter->fw_em_filter_id != UINT64_MAX)
4884                 bnxt_hwrm_clear_em_filter(bp, filter);
4885
4886         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4887
4888         req.flags = rte_cpu_to_le_32(filter->flags);
4889
4890         enables = filter->enables |
4891               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4892         req.dst_id = rte_cpu_to_le_16(dst_id);
4893
4894         if (filter->ip_addr_type) {
4895                 req.ip_addr_type = filter->ip_addr_type;
4896                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4897         }
4898         if (enables &
4899             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4900                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4901         if (enables &
4902             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4903                 memcpy(req.src_macaddr, filter->src_macaddr,
4904                        RTE_ETHER_ADDR_LEN);
4905         if (enables &
4906             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4907                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4908                        RTE_ETHER_ADDR_LEN);
4909         if (enables &
4910             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4911                 req.ovlan_vid = filter->l2_ovlan;
4912         if (enables &
4913             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4914                 req.ivlan_vid = filter->l2_ivlan;
4915         if (enables &
4916             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4917                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4918         if (enables &
4919             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4920                 req.ip_protocol = filter->ip_protocol;
4921         if (enables &
4922             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4923                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4924         if (enables &
4925             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4926                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4927         if (enables &
4928             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4929                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4930         if (enables &
4931             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4932                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4933         if (enables &
4934             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4935                 req.mirror_vnic_id = filter->mirror_vnic_id;
4936
4937         req.enables = rte_cpu_to_le_32(enables);
4938
4939         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4940
4941         HWRM_CHECK_RESULT();
4942
4943         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4944         HWRM_UNLOCK();
4945
4946         return rc;
4947 }
4948
4949 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4950 {
4951         int rc = 0;
4952         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4953         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4954
4955         if (filter->fw_em_filter_id == UINT64_MAX)
4956                 return 0;
4957
4958         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4959
4960         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4961
4962         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4963
4964         HWRM_CHECK_RESULT();
4965         HWRM_UNLOCK();
4966
4967         filter->fw_em_filter_id = UINT64_MAX;
4968         filter->fw_l2_filter_id = UINT64_MAX;
4969
4970         return 0;
4971 }
4972
4973 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4974                          uint16_t dst_id,
4975                          struct bnxt_filter_info *filter)
4976 {
4977         int rc = 0;
4978         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4979         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4980                                                 bp->hwrm_cmd_resp_addr;
4981         uint32_t enables = 0;
4982
4983         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4984                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4985
4986         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4987
4988         req.flags = rte_cpu_to_le_32(filter->flags);
4989
4990         enables = filter->enables |
4991               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4992         req.dst_id = rte_cpu_to_le_16(dst_id);
4993
4994         if (filter->ip_addr_type) {
4995                 req.ip_addr_type = filter->ip_addr_type;
4996                 enables |=
4997                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4998         }
4999         if (enables &
5000             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
5001                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
5002         if (enables &
5003             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
5004                 memcpy(req.src_macaddr, filter->src_macaddr,
5005                        RTE_ETHER_ADDR_LEN);
5006         if (enables &
5007             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
5008                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
5009         if (enables &
5010             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
5011                 req.ip_protocol = filter->ip_protocol;
5012         if (enables &
5013             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
5014                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
5015         if (enables &
5016             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
5017                 req.src_ipaddr_mask[0] =
5018                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
5019         if (enables &
5020             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
5021                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
5022         if (enables &
5023             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
5024                 req.dst_ipaddr_mask[0] =
5025                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
5026         if (enables &
5027             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
5028                 req.src_port = rte_cpu_to_le_16(filter->src_port);
5029         if (enables &
5030             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
5031                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
5032         if (enables &
5033             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
5034                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
5035         if (enables &
5036             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
5037                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
5038         if (enables &
5039             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
5040                 req.mirror_vnic_id = filter->mirror_vnic_id;
5041
5042         req.enables = rte_cpu_to_le_32(enables);
5043
5044         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5045
5046         HWRM_CHECK_RESULT();
5047
5048         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
5049         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
5050         HWRM_UNLOCK();
5051
5052         return rc;
5053 }
5054
5055 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
5056                                 struct bnxt_filter_info *filter)
5057 {
5058         int rc = 0;
5059         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
5060         struct hwrm_cfa_ntuple_filter_free_output *resp =
5061                                                 bp->hwrm_cmd_resp_addr;
5062
5063         if (filter->fw_ntuple_filter_id == UINT64_MAX)
5064                 return 0;
5065
5066         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
5067
5068         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
5069
5070         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5071
5072         HWRM_CHECK_RESULT();
5073         HWRM_UNLOCK();
5074
5075         filter->fw_ntuple_filter_id = UINT64_MAX;
5076
5077         return 0;
5078 }
5079
5080 static int
5081 bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5082 {
5083         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5084         uint8_t *rxq_state = bp->eth_dev->data->rx_queue_state;
5085         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
5086         struct bnxt_rx_queue **rxqs = bp->rx_queues;
5087         uint16_t *ring_tbl = vnic->rss_table;
5088         int nr_ctxs = vnic->num_lb_ctxts;
5089         int max_rings = bp->rx_nr_rings;
5090         int i, j, k, cnt;
5091         int rc = 0;
5092
5093         for (i = 0, k = 0; i < nr_ctxs; i++) {
5094                 struct bnxt_rx_ring_info *rxr;
5095                 struct bnxt_cp_ring_info *cpr;
5096
5097                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
5098
5099                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
5100                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
5101                 req.hash_mode_flags = vnic->hash_mode;
5102
5103                 req.ring_grp_tbl_addr =
5104                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
5105                                      i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
5106                                      2 * sizeof(*ring_tbl));
5107                 req.hash_key_tbl_addr =
5108                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
5109
5110                 req.ring_table_pair_index = i;
5111                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
5112
5113                 for (j = 0; j < 64; j++) {
5114                         uint16_t ring_id;
5115
5116                         /* Find next active ring. */
5117                         for (cnt = 0; cnt < max_rings; cnt++) {
5118                                 if (rxq_state[k] != RTE_ETH_QUEUE_STATE_STOPPED)
5119                                         break;
5120                                 if (++k == max_rings)
5121                                         k = 0;
5122                         }
5123
5124                         /* Return if no rings are active. */
5125                         if (cnt == max_rings) {
5126                                 HWRM_UNLOCK();
5127                                 return 0;
5128                         }
5129
5130                         /* Add rx/cp ring pair to RSS table. */
5131                         rxr = rxqs[k]->rx_ring;
5132                         cpr = rxqs[k]->cp_ring;
5133
5134                         ring_id = rxr->rx_ring_struct->fw_ring_id;
5135                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
5136                         ring_id = cpr->cp_ring_struct->fw_ring_id;
5137                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
5138
5139                         if (++k == max_rings)
5140                                 k = 0;
5141                 }
5142                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5143                                             BNXT_USE_CHIMP_MB);
5144
5145                 HWRM_CHECK_RESULT();
5146                 HWRM_UNLOCK();
5147         }
5148
5149         return rc;
5150 }
5151
5152 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5153 {
5154         unsigned int rss_idx, fw_idx, i;
5155
5156         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5157                 return 0;
5158
5159         if (!(vnic->rss_table && vnic->hash_type))
5160                 return 0;
5161
5162         if (BNXT_CHIP_P5(bp))
5163                 return bnxt_vnic_rss_configure_p5(bp, vnic);
5164
5165         /*
5166          * Fill the RSS hash & redirection table with
5167          * ring group ids for all VNICs
5168          */
5169         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
5170              rss_idx++, fw_idx++) {
5171                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
5172                         fw_idx %= bp->rx_cp_nr_rings;
5173                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
5174                                 break;
5175                         fw_idx++;
5176                 }
5177
5178                 if (i == bp->rx_cp_nr_rings)
5179                         return 0;
5180
5181                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
5182         }
5183
5184         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
5185 }
5186
5187 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
5188         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5189 {
5190         uint16_t flags;
5191
5192         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
5193
5194         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
5195         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
5196
5197         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
5198         req->num_cmpl_dma_aggr_during_int =
5199                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
5200
5201         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
5202
5203         /* min timer set to 1/2 of interrupt timer */
5204         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
5205
5206         /* buf timer set to 1/4 of interrupt timer */
5207         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
5208
5209         req->cmpl_aggr_dma_tmr_during_int =
5210                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
5211
5212         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5213                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5214         req->flags = rte_cpu_to_le_16(flags);
5215 }
5216
5217 static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
5218                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
5219 {
5220         struct hwrm_ring_aggint_qcaps_input req = {0};
5221         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5222         uint32_t enables;
5223         uint16_t flags;
5224         int rc;
5225
5226         HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
5227         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5228         HWRM_CHECK_RESULT();
5229
5230         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
5231         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
5232
5233         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5234                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5235         agg_req->flags = rte_cpu_to_le_16(flags);
5236         enables =
5237          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
5238          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
5239         agg_req->enables = rte_cpu_to_le_32(enables);
5240
5241         HWRM_UNLOCK();
5242         return rc;
5243 }
5244
5245 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
5246                         struct bnxt_coal *coal, uint16_t ring_id)
5247 {
5248         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5249         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
5250                                                 bp->hwrm_cmd_resp_addr;
5251         int rc;
5252
5253         /* Set ring coalesce parameters only for 100G NICs */
5254         if (BNXT_CHIP_P5(bp)) {
5255                 if (bnxt_hwrm_set_coal_params_p5(bp, &req))
5256                         return -1;
5257         } else if (bnxt_stratus_device(bp)) {
5258                 bnxt_hwrm_set_coal_params(coal, &req);
5259         } else {
5260                 return 0;
5261         }
5262
5263         HWRM_PREP(&req,
5264                   HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5265                   BNXT_USE_CHIMP_MB);
5266         req.ring_id = rte_cpu_to_le_16(ring_id);
5267         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5268         HWRM_CHECK_RESULT();
5269         HWRM_UNLOCK();
5270         return 0;
5271 }
5272
5273 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
5274 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5275 {
5276         struct hwrm_func_backing_store_qcaps_input req = {0};
5277         struct hwrm_func_backing_store_qcaps_output *resp =
5278                 bp->hwrm_cmd_resp_addr;
5279         struct bnxt_ctx_pg_info *ctx_pg;
5280         struct bnxt_ctx_mem_info *ctx;
5281         int total_alloc_len;
5282         int rc, i, tqm_rings;
5283
5284         if (!BNXT_CHIP_P5(bp) ||
5285             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
5286             BNXT_VF(bp) ||
5287             bp->ctx)
5288                 return 0;
5289
5290         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
5291         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5292         HWRM_CHECK_RESULT_SILENT();
5293
5294         total_alloc_len = sizeof(*ctx);
5295         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
5296                           RTE_CACHE_LINE_SIZE);
5297         if (!ctx) {
5298                 rc = -ENOMEM;
5299                 goto ctx_err;
5300         }
5301
5302         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
5303         ctx->qp_min_qp1_entries =
5304                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
5305         ctx->qp_max_l2_entries =
5306                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
5307         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
5308         ctx->srq_max_l2_entries =
5309                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
5310         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
5311         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
5312         ctx->cq_max_l2_entries =
5313                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
5314         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
5315         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
5316         ctx->vnic_max_vnic_entries =
5317                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
5318         ctx->vnic_max_ring_table_entries =
5319                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
5320         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
5321         ctx->stat_max_entries =
5322                 rte_le_to_cpu_32(resp->stat_max_entries);
5323         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
5324         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
5325         ctx->tqm_min_entries_per_ring =
5326                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
5327         ctx->tqm_max_entries_per_ring =
5328                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
5329         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5330         if (!ctx->tqm_entries_multiple)
5331                 ctx->tqm_entries_multiple = 1;
5332         ctx->mrav_max_entries =
5333                 rte_le_to_cpu_32(resp->mrav_max_entries);
5334         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
5335         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
5336         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
5337         ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
5338
5339         ctx->tqm_fp_rings_count = ctx->tqm_fp_rings_count ?
5340                                   RTE_MIN(ctx->tqm_fp_rings_count,
5341                                           BNXT_MAX_TQM_FP_LEGACY_RINGS) :
5342                                   bp->max_q;
5343
5344         /* Check if the ext ring count needs to be counted.
5345          * Ext ring count is available only with new FW so we should not
5346          * look at the field on older FW.
5347          */
5348         if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
5349             bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
5350                 ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
5351                 ctx->tqm_fp_rings_count = RTE_MIN(BNXT_MAX_TQM_FP_RINGS,
5352                                                   ctx->tqm_fp_rings_count);
5353         }
5354
5355         tqm_rings = ctx->tqm_fp_rings_count + 1;
5356
5357         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
5358                             sizeof(*ctx_pg) * tqm_rings,
5359                             RTE_CACHE_LINE_SIZE);
5360         if (!ctx_pg) {
5361                 rc = -ENOMEM;
5362                 goto ctx_err;
5363         }
5364         for (i = 0; i < tqm_rings; i++, ctx_pg++)
5365                 ctx->tqm_mem[i] = ctx_pg;
5366
5367         bp->ctx = ctx;
5368 ctx_err:
5369         HWRM_UNLOCK();
5370         return rc;
5371 }
5372
5373 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
5374 {
5375         struct hwrm_func_backing_store_cfg_input req = {0};
5376         struct hwrm_func_backing_store_cfg_output *resp =
5377                 bp->hwrm_cmd_resp_addr;
5378         struct bnxt_ctx_mem_info *ctx = bp->ctx;
5379         struct bnxt_ctx_pg_info *ctx_pg;
5380         uint32_t *num_entries;
5381         uint64_t *pg_dir;
5382         uint8_t *pg_attr;
5383         uint32_t ena;
5384         int i, rc;
5385
5386         if (!ctx)
5387                 return 0;
5388
5389         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
5390         req.enables = rte_cpu_to_le_32(enables);
5391
5392         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
5393                 ctx_pg = &ctx->qp_mem;
5394                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5395                 req.qp_num_qp1_entries =
5396                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
5397                 req.qp_num_l2_entries =
5398                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
5399                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
5400                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5401                                       &req.qpc_pg_size_qpc_lvl,
5402                                       &req.qpc_page_dir);
5403         }
5404
5405         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5406                 ctx_pg = &ctx->srq_mem;
5407                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5408                 req.srq_num_l2_entries =
5409                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5410                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5411                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5412                                       &req.srq_pg_size_srq_lvl,
5413                                       &req.srq_page_dir);
5414         }
5415
5416         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5417                 ctx_pg = &ctx->cq_mem;
5418                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5419                 req.cq_num_l2_entries =
5420                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5421                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5422                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5423                                       &req.cq_pg_size_cq_lvl,
5424                                       &req.cq_page_dir);
5425         }
5426
5427         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5428                 ctx_pg = &ctx->vnic_mem;
5429                 req.vnic_num_vnic_entries =
5430                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5431                 req.vnic_num_ring_table_entries =
5432                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5433                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5434                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5435                                       &req.vnic_pg_size_vnic_lvl,
5436                                       &req.vnic_page_dir);
5437         }
5438
5439         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5440                 ctx_pg = &ctx->stat_mem;
5441                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5442                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5443                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5444                                       &req.stat_pg_size_stat_lvl,
5445                                       &req.stat_page_dir);
5446         }
5447
5448         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5449         num_entries = &req.tqm_sp_num_entries;
5450         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5451         pg_dir = &req.tqm_sp_page_dir;
5452         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5453         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5454                 if (!(enables & ena))
5455                         continue;
5456
5457                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5458
5459                 ctx_pg = ctx->tqm_mem[i];
5460                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5461                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5462         }
5463
5464         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
5465                 /* DPDK does not need to configure MRAV and TIM type.
5466                  * So we are skipping over MRAV and TIM. Skip to configure
5467                  * HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8.
5468                  */
5469                 ctx_pg = ctx->tqm_mem[BNXT_MAX_TQM_LEGACY_RINGS];
5470                 req.tqm_ring8_num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5471                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5472                                       &req.tqm_ring8_pg_size_tqm_ring_lvl,
5473                                       &req.tqm_ring8_page_dir);
5474         }
5475
5476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5477         HWRM_CHECK_RESULT();
5478         HWRM_UNLOCK();
5479
5480         return rc;
5481 }
5482
5483 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5484 {
5485         struct hwrm_port_qstats_ext_input req = {0};
5486         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5487         struct bnxt_pf_info *pf = bp->pf;
5488         int rc;
5489
5490         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5491               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5492                 return 0;
5493
5494         HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5495
5496         req.port_id = rte_cpu_to_le_16(pf->port_id);
5497         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5498                 req.tx_stat_host_addr =
5499                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5500                 req.tx_stat_size =
5501                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5502         }
5503         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5504                 req.rx_stat_host_addr =
5505                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5506                 req.rx_stat_size =
5507                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5508         }
5509         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5510
5511         if (rc) {
5512                 bp->fw_rx_port_stats_ext_size = 0;
5513                 bp->fw_tx_port_stats_ext_size = 0;
5514         } else {
5515                 bp->fw_rx_port_stats_ext_size =
5516                         rte_le_to_cpu_16(resp->rx_stat_size);
5517                 bp->fw_tx_port_stats_ext_size =
5518                         rte_le_to_cpu_16(resp->tx_stat_size);
5519         }
5520
5521         HWRM_CHECK_RESULT();
5522         HWRM_UNLOCK();
5523
5524         return rc;
5525 }
5526
5527 int
5528 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5529 {
5530         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5531         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5532                 bp->hwrm_cmd_resp_addr;
5533         int rc = 0;
5534
5535         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5536         req.tunnel_type = type;
5537         req.dest_fid = bp->fw_fid;
5538         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5539         HWRM_CHECK_RESULT();
5540
5541         HWRM_UNLOCK();
5542
5543         return rc;
5544 }
5545
5546 int
5547 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5548 {
5549         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5550         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5551                 bp->hwrm_cmd_resp_addr;
5552         int rc = 0;
5553
5554         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5555         req.tunnel_type = type;
5556         req.dest_fid = bp->fw_fid;
5557         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5558         HWRM_CHECK_RESULT();
5559
5560         HWRM_UNLOCK();
5561
5562         return rc;
5563 }
5564
5565 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5566 {
5567         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5568         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5569                 bp->hwrm_cmd_resp_addr;
5570         int rc = 0;
5571
5572         HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5573         req.src_fid = bp->fw_fid;
5574         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5575         HWRM_CHECK_RESULT();
5576
5577         if (type)
5578                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5579
5580         HWRM_UNLOCK();
5581
5582         return rc;
5583 }
5584
5585 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5586                                    uint16_t *dst_fid)
5587 {
5588         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5589         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5590                 bp->hwrm_cmd_resp_addr;
5591         int rc = 0;
5592
5593         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5594         req.src_fid = bp->fw_fid;
5595         req.tunnel_type = tun_type;
5596         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5597         HWRM_CHECK_RESULT();
5598
5599         if (dst_fid)
5600                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5601
5602         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5603
5604         HWRM_UNLOCK();
5605
5606         return rc;
5607 }
5608
5609 int bnxt_hwrm_set_mac(struct bnxt *bp)
5610 {
5611         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5612         struct hwrm_func_vf_cfg_input req = {0};
5613         int rc = 0;
5614
5615         if (!BNXT_VF(bp))
5616                 return 0;
5617
5618         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5619
5620         req.enables =
5621                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5622         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5623
5624         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5625
5626         HWRM_CHECK_RESULT();
5627
5628         HWRM_UNLOCK();
5629
5630         return rc;
5631 }
5632
5633 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5634 {
5635         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5636         struct hwrm_func_drv_if_change_input req = {0};
5637         uint32_t flags;
5638         int rc;
5639
5640         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5641                 return 0;
5642
5643         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5644          * If we issue FUNC_DRV_IF_CHANGE with flags down before
5645          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5646          */
5647         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5648                 return 0;
5649
5650         HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5651
5652         if (up)
5653                 req.flags =
5654                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5655
5656         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5657
5658         HWRM_CHECK_RESULT();
5659         flags = rte_le_to_cpu_32(resp->flags);
5660         HWRM_UNLOCK();
5661
5662         if (!up)
5663                 return 0;
5664
5665         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5666                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5667                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5668         }
5669
5670         return 0;
5671 }
5672
5673 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5674 {
5675         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5676         struct bnxt_error_recovery_info *info = bp->recovery_info;
5677         struct hwrm_error_recovery_qcfg_input req = {0};
5678         uint32_t flags = 0;
5679         unsigned int i;
5680         int rc;
5681
5682         /* Older FW does not have error recovery support */
5683         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5684                 return 0;
5685
5686         HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5687
5688         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5689
5690         HWRM_CHECK_RESULT();
5691
5692         flags = rte_le_to_cpu_32(resp->flags);
5693         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5694                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5695         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5696                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5697
5698         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5699             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5700                 rc = -EINVAL;
5701                 goto err;
5702         }
5703
5704         /* FW returned values are in units of 100msec */
5705         info->driver_polling_freq =
5706                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5707         info->primary_func_wait_period =
5708                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5709         info->normal_func_wait_period =
5710                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5711         info->primary_func_wait_period_after_reset =
5712                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5713         info->max_bailout_time_after_reset =
5714                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5715         info->status_regs[BNXT_FW_STATUS_REG] =
5716                 rte_le_to_cpu_32(resp->fw_health_status_reg);
5717         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5718                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5719         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5720                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5721         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5722                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5723         info->reg_array_cnt =
5724                 rte_le_to_cpu_32(resp->reg_array_cnt);
5725
5726         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5727                 rc = -EINVAL;
5728                 goto err;
5729         }
5730
5731         for (i = 0; i < info->reg_array_cnt; i++) {
5732                 info->reset_reg[i] =
5733                         rte_le_to_cpu_32(resp->reset_reg[i]);
5734                 info->reset_reg_val[i] =
5735                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
5736                 info->delay_after_reset[i] =
5737                         resp->delay_after_reset[i];
5738         }
5739 err:
5740         HWRM_UNLOCK();
5741
5742         /* Map the FW status registers */
5743         if (!rc)
5744                 rc = bnxt_map_fw_health_status_regs(bp);
5745
5746         if (rc) {
5747                 rte_free(bp->recovery_info);
5748                 bp->recovery_info = NULL;
5749         }
5750         return rc;
5751 }
5752
5753 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5754 {
5755         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5756         struct hwrm_fw_reset_input req = {0};
5757         int rc;
5758
5759         if (!BNXT_PF(bp))
5760                 return -EOPNOTSUPP;
5761
5762         HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5763
5764         req.embedded_proc_type =
5765                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5766         req.selfrst_status =
5767                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5768         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5769
5770         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5771                                     BNXT_USE_KONG(bp));
5772
5773         HWRM_CHECK_RESULT();
5774         HWRM_UNLOCK();
5775
5776         return rc;
5777 }
5778
5779 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5780 {
5781         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5782         struct hwrm_port_ts_query_input req = {0};
5783         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5784         uint32_t flags = 0;
5785         int rc;
5786
5787         if (!ptp)
5788                 return 0;
5789
5790         HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5791
5792         switch (path) {
5793         case BNXT_PTP_FLAGS_PATH_TX:
5794                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5795                 break;
5796         case BNXT_PTP_FLAGS_PATH_RX:
5797                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5798                 break;
5799         case BNXT_PTP_FLAGS_CURRENT_TIME:
5800                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5801                 break;
5802         }
5803
5804         req.flags = rte_cpu_to_le_32(flags);
5805         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5806
5807         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5808
5809         HWRM_CHECK_RESULT();
5810
5811         if (timestamp) {
5812                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5813                 *timestamp |=
5814                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5815         }
5816         HWRM_UNLOCK();
5817
5818         return rc;
5819 }
5820
5821 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5822 {
5823         int rc = 0;
5824
5825         struct hwrm_cfa_counter_qcaps_input req = {0};
5826         struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5827
5828         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5829                 PMD_DRV_LOG(DEBUG,
5830                             "Not a PF or trusted VF. Command not supported\n");
5831                 return 0;
5832         }
5833
5834         HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5835         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5836         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5837
5838         HWRM_CHECK_RESULT();
5839         if (max_fc)
5840                 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5841         HWRM_UNLOCK();
5842
5843         return 0;
5844 }
5845
5846 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5847 {
5848         int rc = 0;
5849         struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5850         struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5851
5852         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5853                 PMD_DRV_LOG(DEBUG,
5854                             "Not a PF or trusted VF. Command not supported\n");
5855                 return 0;
5856         }
5857
5858         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5859
5860         req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5861         req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5862         req.page_dir = rte_cpu_to_le_64(dma_addr);
5863
5864         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5865
5866         HWRM_CHECK_RESULT();
5867         if (ctx_id) {
5868                 *ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5869                 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5870         }
5871         HWRM_UNLOCK();
5872
5873         return 0;
5874 }
5875
5876 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5877 {
5878         int rc = 0;
5879         struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5880         struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5881
5882         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5883                 PMD_DRV_LOG(DEBUG,
5884                             "Not a PF or trusted VF. Command not supported\n");
5885                 return 0;
5886         }
5887
5888         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5889
5890         req.ctx_id = rte_cpu_to_le_16(ctx_id);
5891
5892         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5893
5894         HWRM_CHECK_RESULT();
5895         HWRM_UNLOCK();
5896
5897         return rc;
5898 }
5899
5900 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5901                               uint16_t cntr, uint16_t ctx_id,
5902                               uint32_t num_entries, bool enable)
5903 {
5904         struct hwrm_cfa_counter_cfg_input req = {0};
5905         struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5906         uint16_t flags = 0;
5907         int rc;
5908
5909         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5910                 PMD_DRV_LOG(DEBUG,
5911                             "Not a PF or trusted VF. Command not supported\n");
5912                 return 0;
5913         }
5914
5915         HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5916
5917         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5918         req.counter_type = rte_cpu_to_le_16(cntr);
5919         flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5920                 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5921         flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5922         if (dir == BNXT_DIR_RX)
5923                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5924         else if (dir == BNXT_DIR_TX)
5925                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5926         req.flags = rte_cpu_to_le_16(flags);
5927         req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5928         req.num_entries = rte_cpu_to_le_32(num_entries);
5929
5930         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5931         HWRM_CHECK_RESULT();
5932         HWRM_UNLOCK();
5933
5934         return 0;
5935 }
5936
5937 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5938                                  enum bnxt_flow_dir dir,
5939                                  uint16_t cntr,
5940                                  uint16_t num_entries)
5941 {
5942         struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5943         struct hwrm_cfa_counter_qstats_input req = {0};
5944         uint16_t flow_ctx_id = 0;
5945         uint16_t flags = 0;
5946         int rc = 0;
5947
5948         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5949                 PMD_DRV_LOG(DEBUG,
5950                             "Not a PF or trusted VF. Command not supported\n");
5951                 return 0;
5952         }
5953
5954         if (dir == BNXT_DIR_RX) {
5955                 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5956                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5957         } else if (dir == BNXT_DIR_TX) {
5958                 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5959                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5960         }
5961
5962         HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5963         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5964         req.counter_type = rte_cpu_to_le_16(cntr);
5965         req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5966         req.num_entries = rte_cpu_to_le_16(num_entries);
5967         req.flags = rte_cpu_to_le_16(flags);
5968         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5969
5970         HWRM_CHECK_RESULT();
5971         HWRM_UNLOCK();
5972
5973         return 0;
5974 }
5975
5976 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5977                                 uint16_t *first_vf_id)
5978 {
5979         int rc = 0;
5980         struct hwrm_func_qcaps_input req = {.req_type = 0 };
5981         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5982
5983         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5984
5985         req.fid = rte_cpu_to_le_16(fid);
5986
5987         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5988
5989         HWRM_CHECK_RESULT();
5990
5991         if (first_vf_id)
5992                 *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
5993
5994         HWRM_UNLOCK();
5995
5996         return rc;
5997 }
5998
5999 int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, struct bnxt_representor *rep_bp)
6000 {
6001         struct hwrm_cfa_pair_info_output *resp = bp->hwrm_cmd_resp_addr;
6002         struct hwrm_cfa_pair_info_input req = {0};
6003         int rc = 0;
6004
6005         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
6006                 PMD_DRV_LOG(DEBUG,
6007                             "Not a PF or trusted VF. Command not supported\n");
6008                 return 0;
6009         }
6010
6011         HWRM_PREP(&req, HWRM_CFA_PAIR_INFO, BNXT_USE_CHIMP_MB);
6012         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
6013                  bp->eth_dev->data->name, rep_bp->vf_id);
6014         req.flags =
6015                 rte_cpu_to_le_32(HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE);
6016
6017         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6018         HWRM_CHECK_RESULT();
6019         if (rc == HWRM_ERR_CODE_SUCCESS && strlen(resp->pair_name)) {
6020                 HWRM_UNLOCK();
6021                 return !rc;
6022         }
6023         HWRM_UNLOCK();
6024         return rc;
6025 }
6026
6027 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
6028 {
6029         struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6030         struct hwrm_cfa_pair_alloc_input req = {0};
6031         int rc;
6032
6033         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
6034                 PMD_DRV_LOG(DEBUG,
6035                             "Not a PF or trusted VF. Command not supported\n");
6036                 return 0;
6037         }
6038
6039         HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
6040         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
6041         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
6042                  bp->eth_dev->data->name, rep_bp->vf_id);
6043
6044         req.pf_b_id = rep_bp->parent_pf_idx;
6045         req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
6046                                                 rte_cpu_to_le_16(rep_bp->vf_id);
6047         req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
6048         req.host_b_id = 1; /* TBD - Confirm if this is OK */
6049
6050         req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
6051                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
6052         req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
6053                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
6054         req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
6055                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
6056         req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
6057                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
6058
6059         req.q_ab = rep_bp->rep_q_r2f;
6060         req.q_ba = rep_bp->rep_q_f2r;
6061         req.fc_ab = rep_bp->rep_fc_r2f;
6062         req.fc_ba = rep_bp->rep_fc_f2r;
6063
6064         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6065         HWRM_CHECK_RESULT();
6066
6067         HWRM_UNLOCK();
6068         PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
6069                     BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
6070         return rc;
6071 }
6072
6073 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
6074 {
6075         struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
6076         struct hwrm_cfa_pair_free_input req = {0};
6077         int rc;
6078
6079         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
6080                 PMD_DRV_LOG(DEBUG,
6081                             "Not a PF or trusted VF. Command not supported\n");
6082                 return 0;
6083         }
6084
6085         HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
6086         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
6087                  bp->eth_dev->data->name, rep_bp->vf_id);
6088         req.pf_b_id = rep_bp->parent_pf_idx;
6089         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
6090         req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
6091                                                 rte_cpu_to_le_16(rep_bp->vf_id);
6092         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6093         HWRM_CHECK_RESULT();
6094         HWRM_UNLOCK();
6095         PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
6096                     rep_bp->vf_id);
6097         return rc;
6098 }
6099
6100 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
6101 {
6102         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
6103                                         bp->hwrm_cmd_resp_addr;
6104         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
6105         uint32_t flags = 0;
6106         int rc = 0;
6107
6108         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
6109                 return 0;
6110
6111         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
6112                 PMD_DRV_LOG(DEBUG,
6113                             "Not a PF or trusted VF. Command not supported\n");
6114                 return 0;
6115         }
6116
6117         HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_CHIMP_MB);
6118         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6119
6120         HWRM_CHECK_RESULT();
6121         flags = rte_le_to_cpu_32(resp->flags);
6122         HWRM_UNLOCK();
6123
6124         if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED)
6125                 bp->flags |= BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2;
6126         else
6127                 bp->flags |= BNXT_FLAG_RFS_NEEDS_VNIC;
6128
6129         return rc;
6130 }
6131
6132 int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1,
6133                             uint32_t echo_req_data2)
6134 {
6135         struct hwrm_func_echo_response_input req = {0};
6136         struct hwrm_func_echo_response_output *resp = bp->hwrm_cmd_resp_addr;
6137         int rc;
6138
6139         HWRM_PREP(&req, HWRM_FUNC_ECHO_RESPONSE, BNXT_USE_CHIMP_MB);
6140         req.event_data1 = rte_cpu_to_le_32(echo_req_data1);
6141         req.event_data2 = rte_cpu_to_le_32(echo_req_data2);
6142
6143         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6144
6145         HWRM_CHECK_RESULT();
6146         HWRM_UNLOCK();
6147
6148         return rc;
6149 }
6150
6151 int bnxt_hwrm_poll_ver_get(struct bnxt *bp)
6152 {
6153         struct hwrm_ver_get_input req = {.req_type = 0 };
6154         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
6155         int rc = 0;
6156
6157         bp->max_req_len = HWRM_MAX_REQ_LEN;
6158         bp->max_resp_len = BNXT_PAGE_SIZE;
6159         bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
6160
6161         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
6162         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6163         req.hwrm_intf_min = HWRM_VERSION_MINOR;
6164         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6165
6166         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6167
6168         HWRM_CHECK_RESULT_SILENT();
6169
6170         if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
6171                 rc = -EAGAIN;
6172
6173         HWRM_UNLOCK();
6174
6175         return rc;
6176 }
6177
6178 int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr,
6179                                           uint16_t page_number, uint16_t start_addr,
6180                                           uint16_t data_length, uint8_t *buf)
6181 {
6182         struct hwrm_port_phy_i2c_read_output *resp = bp->hwrm_cmd_resp_addr;
6183         struct hwrm_port_phy_i2c_read_input req = {0};
6184         uint32_t enables = HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET;
6185         int rc, byte_offset = 0;
6186
6187         do {
6188                 uint16_t xfer_size;
6189
6190                 HWRM_PREP(&req, HWRM_PORT_PHY_I2C_READ, BNXT_USE_CHIMP_MB);
6191                 req.i2c_slave_addr = i2c_addr;
6192                 req.page_number = rte_cpu_to_le_16(page_number);
6193                 req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
6194
6195                 xfer_size = RTE_MIN(data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
6196                 req.page_offset = rte_cpu_to_le_16(start_addr + byte_offset);
6197                 req.data_length = xfer_size;
6198                 req.enables = rte_cpu_to_le_32(start_addr + byte_offset ? enables : 0);
6199                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6200                 HWRM_CHECK_RESULT();
6201
6202                 memcpy(buf + byte_offset, resp->data, xfer_size);
6203
6204                 data_length -= xfer_size;
6205                 byte_offset += xfer_size;
6206
6207                 HWRM_UNLOCK();
6208         } while (data_length > 0);
6209
6210         return rc;
6211 }
6212
6213 void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index)
6214 {
6215         struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
6216         struct bnxt_tx_ring_info *txr = txq->tx_ring;
6217         struct bnxt_ring *ring = txr->tx_ring_struct;
6218         struct bnxt_cp_ring_info *cpr = txq->cp_ring;
6219
6220         bnxt_hwrm_ring_free(bp, ring,
6221                             HWRM_RING_FREE_INPUT_RING_TYPE_TX,
6222                             cpr->cp_ring_struct->fw_ring_id);
6223         txr->tx_raw_prod = 0;
6224         txr->tx_raw_cons = 0;
6225         memset(txr->tx_desc_ring, 0,
6226                 txr->tx_ring_struct->ring_size * sizeof(*txr->tx_desc_ring));
6227         memset(txr->tx_buf_ring, 0,
6228                 txr->tx_ring_struct->ring_size * sizeof(*txr->tx_buf_ring));
6229
6230         bnxt_hwrm_stat_ctx_free(bp, cpr);
6231
6232         bnxt_free_cp_ring(bp, cpr);
6233 }
6234
6235 int bnxt_hwrm_config_host_mtu(struct bnxt *bp)
6236 {
6237         struct hwrm_func_cfg_input req = {0};
6238         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
6239         int rc;
6240
6241         if (!BNXT_PF(bp))
6242                 return 0;
6243
6244         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
6245
6246         req.fid = rte_cpu_to_le_16(0xffff);
6247         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU);
6248         req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu);
6249
6250         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6251         HWRM_CHECK_RESULT();
6252         HWRM_UNLOCK();
6253
6254         return rc;
6255 }