net/bnxt: fix ring teardown
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #define HWRM_SPEC_CODE_1_8_3            0x10803
28 #define HWRM_VERSION_1_9_1              0x10901
29 #define HWRM_VERSION_1_9_2              0x10903
30 #define HWRM_VERSION_1_10_2_13          0x10a020d
31 struct bnxt_plcmodes_cfg {
32         uint32_t        flags;
33         uint16_t        jumbo_thresh;
34         uint16_t        hds_offset;
35         uint16_t        hds_threshold;
36 };
37
38 static int page_getenum(size_t size)
39 {
40         if (size <= 1 << 4)
41                 return 4;
42         if (size <= 1 << 12)
43                 return 12;
44         if (size <= 1 << 13)
45                 return 13;
46         if (size <= 1 << 16)
47                 return 16;
48         if (size <= 1 << 21)
49                 return 21;
50         if (size <= 1 << 22)
51                 return 22;
52         if (size <= 1 << 30)
53                 return 30;
54         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55         return sizeof(int) * 8 - 1;
56 }
57
58 static int page_roundup(size_t size)
59 {
60         return 1 << page_getenum(size);
61 }
62
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64                                   uint8_t *pg_attr,
65                                   uint64_t *pg_dir)
66 {
67         if (rmem->nr_pages == 0)
68                 return;
69
70         if (rmem->nr_pages > 1) {
71                 *pg_attr = 1;
72                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
73         } else {
74                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
75         }
76 }
77
78 static struct bnxt_cp_ring_info*
79 bnxt_get_ring_info_by_id(struct bnxt *bp, uint16_t rid, uint16_t type)
80 {
81         struct bnxt_cp_ring_info *cp_ring = NULL;
82         uint16_t i;
83
84         switch (type) {
85         case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
86         case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
87                 /* FALLTHROUGH */
88                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
89                         struct bnxt_rx_queue *rxq = bp->rx_queues[i];
90
91                         if (rxq->cp_ring->cp_ring_struct->fw_ring_id ==
92                             rte_cpu_to_le_16(rid)) {
93                                 return rxq->cp_ring;
94                         }
95                 }
96                 break;
97         case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
98                 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
99                         struct bnxt_tx_queue *txq = bp->tx_queues[i];
100
101                         if (txq->cp_ring->cp_ring_struct->fw_ring_id ==
102                             rte_cpu_to_le_16(rid)) {
103                                 return txq->cp_ring;
104                         }
105                 }
106                 break;
107         default:
108                 return cp_ring;
109         }
110         return cp_ring;
111 }
112
113 /* Complete a sweep of the CQ ring for the corresponding Tx/Rx/AGG ring.
114  * If the CMPL_BASE_TYPE_HWRM_DONE is not encountered by the last pass,
115  * before timeout, we force the done bit for the cleanup to proceed.
116  * Also if cpr is null, do nothing.. The HWRM command is  not for a
117  * Tx/Rx/AGG ring cleanup.
118  */
119 static int
120 bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
121                         bool tx, bool rx, bool timeout)
122 {
123         int done = 0;
124
125         if (cpr != NULL) {
126                 if (tx)
127                         done = bnxt_flush_tx_cmp(cpr);
128
129                 if (rx)
130                         done = bnxt_flush_rx_cmp(cpr);
131
132                 if (done)
133                         PMD_DRV_LOG(DEBUG, "HWRM DONE for %s ring\n",
134                                     rx ? "Rx" : "Tx");
135
136                 /* We are about to timeout and still haven't seen the
137                  * HWRM done for the Ring free. Force the cleanup.
138                  */
139                 if (!done && timeout) {
140                         done = 1;
141                         PMD_DRV_LOG(DEBUG, "Timing out for %s ring\n",
142                                     rx ? "Rx" : "Tx");
143                 }
144         } else {
145                 /* This HWRM command is not for a Tx/Rx/AGG ring cleanup.
146                  * Otherwise the cpr would have been valid. So do nothing.
147                  */
148                 done = 1;
149         }
150
151         return done;
152 }
153
154 /*
155  * HWRM Functions (sent to HWRM)
156  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
157  * HWRM command times out, or a negative error code if the HWRM
158  * command was failed by the FW.
159  */
160
161 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
162                                   uint32_t msg_len, bool use_kong_mb)
163 {
164         unsigned int i;
165         struct input *req = msg;
166         struct output *resp = bp->hwrm_cmd_resp_addr;
167         uint32_t *data = msg;
168         uint8_t *bar;
169         uint8_t *valid;
170         uint16_t max_req_len = bp->max_req_len;
171         struct hwrm_short_input short_input = { 0 };
172         uint16_t bar_offset = use_kong_mb ?
173                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
174         uint16_t mb_trigger_offset = use_kong_mb ?
175                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
176         struct bnxt_cp_ring_info *cpr = NULL;
177         bool is_rx = false;
178         bool is_tx = false;
179         uint32_t timeout;
180
181         /* Do not send HWRM commands to firmware in error state */
182         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
183                 return 0;
184
185         timeout = bp->hwrm_cmd_timeout;
186
187         /* Update the message length for backing store config for new FW. */
188         if (bp->fw_ver >= HWRM_VERSION_1_10_2_13 &&
189             rte_cpu_to_le_16(req->req_type) == HWRM_FUNC_BACKING_STORE_CFG)
190                 msg_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
191
192         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
193             msg_len > bp->max_req_len) {
194                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
195
196                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
197                 memcpy(short_cmd_req, req, msg_len);
198
199                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
200                 short_input.signature = rte_cpu_to_le_16(
201                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
202                 short_input.size = rte_cpu_to_le_16(msg_len);
203                 short_input.req_addr =
204                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
205
206                 data = (uint32_t *)&short_input;
207                 msg_len = sizeof(short_input);
208
209                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
210         }
211
212         /* Write request msg to hwrm channel */
213         for (i = 0; i < msg_len; i += 4) {
214                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
215                 rte_write32(*data, bar);
216                 data++;
217         }
218
219         /* Zero the rest of the request space */
220         for (; i < max_req_len; i += 4) {
221                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
222                 rte_write32(0, bar);
223         }
224
225         /* Ring channel doorbell */
226         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
227         rte_write32(1, bar);
228         /*
229          * Make sure the channel doorbell ring command complete before
230          * reading the response to avoid getting stale or invalid
231          * responses.
232          */
233         rte_io_mb();
234
235         /* Check ring flush is done.
236          * This is valid only for Tx and Rx rings (including AGG rings).
237          * The Tx and Rx rings should be freed once the HW confirms all
238          * the internal buffers and BDs associated with the rings are
239          * consumed and the corresponding DMA is handled.
240          */
241         if (rte_cpu_to_le_16(req->cmpl_ring) != INVALID_HW_RING_ID) {
242                 /* Check if the TxCQ matches. If that fails check if RxCQ
243                  * matches. And if neither match, is_rx = false, is_tx = false.
244                  */
245                 cpr = bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
246                                                HWRM_RING_FREE_INPUT_RING_TYPE_TX);
247                 if (cpr == NULL) {
248                         /* Not a TxCQ. Check if the RxCQ matches. */
249                         cpr =
250                         bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
251                                                  HWRM_RING_FREE_INPUT_RING_TYPE_RX);
252                         if (cpr != NULL)
253                                 is_rx = true;
254                 } else {
255                         is_tx = true;
256                 }
257         }
258
259         /* Poll for the valid bit */
260         for (i = 0; i < timeout; i++) {
261                 int done;
262
263                 done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx,
264                                                i == timeout - 1);
265                 /* Sanity check on the resp->resp_len */
266                 rte_io_rmb();
267                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
268                         /* Last byte of resp contains the valid key */
269                         valid = (uint8_t *)resp + resp->resp_len - 1;
270                         if (*valid == HWRM_RESP_VALID_KEY && done)
271                                 break;
272                 }
273                 rte_delay_us(1);
274         }
275
276         if (i >= timeout) {
277                 /* Suppress VER_GET timeout messages during reset recovery */
278                 if (bp->flags & BNXT_FLAG_FW_RESET &&
279                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
280                         return -ETIMEDOUT;
281
282                 PMD_DRV_LOG(ERR,
283                             "Error(timeout) sending msg 0x%04x, seq_id %d\n",
284                             req->req_type, req->seq_id);
285                 return -ETIMEDOUT;
286         }
287         return 0;
288 }
289
290 /*
291  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
292  * spinlock, and does initial processing.
293  *
294  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
295  * releases the spinlock only if it returns. If the regular int return codes
296  * are not used by the function, HWRM_CHECK_RESULT() should not be used
297  * directly, rather it should be copied and modified to suit the function.
298  *
299  * HWRM_UNLOCK() must be called after all response processing is completed.
300  */
301 #define HWRM_PREP(req, type, kong) do { \
302         rte_spinlock_lock(&bp->hwrm_lock); \
303         if (bp->hwrm_cmd_resp_addr == NULL) { \
304                 rte_spinlock_unlock(&bp->hwrm_lock); \
305                 return -EACCES; \
306         } \
307         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
308         (req)->req_type = rte_cpu_to_le_16(type); \
309         (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
310         (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
311                 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
312         (req)->target_id = rte_cpu_to_le_16(0xffff); \
313         (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
314 } while (0)
315
316 #define HWRM_CHECK_RESULT_SILENT() do {\
317         if (rc) { \
318                 rte_spinlock_unlock(&bp->hwrm_lock); \
319                 return rc; \
320         } \
321         if (resp->error_code) { \
322                 rc = rte_le_to_cpu_16(resp->error_code); \
323                 rte_spinlock_unlock(&bp->hwrm_lock); \
324                 return rc; \
325         } \
326 } while (0)
327
328 #define HWRM_CHECK_RESULT() do {\
329         if (rc) { \
330                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
331                 rte_spinlock_unlock(&bp->hwrm_lock); \
332                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
333                         rc = -EACCES; \
334                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
335                         rc = -ENOSPC; \
336                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
337                         rc = -EINVAL; \
338                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
339                         rc = -ENOTSUP; \
340                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
341                         rc = -EAGAIN; \
342                 else if (rc > 0) \
343                         rc = -EIO; \
344                 return rc; \
345         } \
346         if (resp->error_code) { \
347                 rc = rte_le_to_cpu_16(resp->error_code); \
348                 if (resp->resp_len >= 16) { \
349                         struct hwrm_err_output *tmp_hwrm_err_op = \
350                                                 (void *)resp; \
351                         PMD_DRV_LOG(ERR, \
352                                 "error %d:%d:%08x:%04x\n", \
353                                 rc, tmp_hwrm_err_op->cmd_err, \
354                                 rte_le_to_cpu_32(\
355                                         tmp_hwrm_err_op->opaque_0), \
356                                 rte_le_to_cpu_16(\
357                                         tmp_hwrm_err_op->opaque_1)); \
358                 } else { \
359                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
360                 } \
361                 rte_spinlock_unlock(&bp->hwrm_lock); \
362                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
363                         rc = -EACCES; \
364                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
365                         rc = -ENOSPC; \
366                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
367                         rc = -EINVAL; \
368                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
369                         rc = -ENOTSUP; \
370                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
371                         rc = -EAGAIN; \
372                 else if (rc > 0) \
373                         rc = -EIO; \
374                 return rc; \
375         } \
376 } while (0)
377
378 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
379
380 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
381                                 bool use_kong_mb,
382                                 uint16_t msg_type,
383                                 void *msg,
384                                 uint32_t msg_len,
385                                 void *resp_msg,
386                                 uint32_t resp_len)
387 {
388         int rc = 0;
389         bool mailbox = BNXT_USE_CHIMP_MB;
390         struct input *req = msg;
391         struct output *resp = bp->hwrm_cmd_resp_addr;
392
393         if (use_kong_mb)
394                 mailbox = BNXT_USE_KONG(bp);
395
396         HWRM_PREP(req, msg_type, mailbox);
397
398         rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
399
400         HWRM_CHECK_RESULT();
401
402         if (resp_msg)
403                 memcpy(resp_msg, resp, resp_len);
404
405         HWRM_UNLOCK();
406
407         return rc;
408 }
409
410 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
411                                   bool use_kong_mb,
412                                   uint16_t tf_type,
413                                   uint16_t tf_subtype,
414                                   uint32_t *tf_response_code,
415                                   void *msg,
416                                   uint32_t msg_len,
417                                   void *response,
418                                   uint32_t response_len)
419 {
420         int rc = 0;
421         struct hwrm_cfa_tflib_input req = { .req_type = 0 };
422         struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
423         bool mailbox = BNXT_USE_CHIMP_MB;
424
425         if (msg_len > sizeof(req.tf_req))
426                 return -ENOMEM;
427
428         if (use_kong_mb)
429                 mailbox = BNXT_USE_KONG(bp);
430
431         HWRM_PREP(&req, HWRM_TF, mailbox);
432         /* Build request using the user supplied request payload.
433          * TLV request size is checked at build time against HWRM
434          * request max size, thus no checking required.
435          */
436         req.tf_type = tf_type;
437         req.tf_subtype = tf_subtype;
438         memcpy(req.tf_req, msg, msg_len);
439
440         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
441         HWRM_CHECK_RESULT();
442
443         /* Copy the resp to user provided response buffer */
444         if (response != NULL)
445                 /* Post process response data. We need to copy only
446                  * the 'payload' as the HWRM data structure really is
447                  * HWRM header + msg header + payload and the TFLIB
448                  * only provided a payload place holder.
449                  */
450                 if (response_len != 0) {
451                         memcpy(response,
452                                resp->tf_resp,
453                                response_len);
454                 }
455
456         /* Extract the internal tflib response code */
457         *tf_response_code = resp->tf_resp_code;
458         HWRM_UNLOCK();
459
460         return rc;
461 }
462
463 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
464 {
465         int rc = 0;
466         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
467         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
468
469         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
470         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
471         req.mask = 0;
472
473         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
474
475         HWRM_CHECK_RESULT();
476         HWRM_UNLOCK();
477
478         return rc;
479 }
480
481 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
482                                  struct bnxt_vnic_info *vnic,
483                                  uint16_t vlan_count,
484                                  struct bnxt_vlan_table_entry *vlan_table)
485 {
486         int rc = 0;
487         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
488         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
489         uint32_t mask = 0;
490
491         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
492                 return rc;
493
494         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
495         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
496
497         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
498                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
499         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
500                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
501
502         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
503                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
504
505         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
506                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
507         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
508                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
509                 req.num_mc_entries = rte_cpu_to_le_32(bp->nb_mc_addr);
510                 req.mc_tbl_addr = rte_cpu_to_le_64(bp->mc_list_dma_addr);
511         }
512         if (vlan_table) {
513                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
514                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
515                 req.vlan_tag_tbl_addr =
516                         rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
517                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
518         }
519         req.mask = rte_cpu_to_le_32(mask);
520
521         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
522
523         HWRM_CHECK_RESULT();
524         HWRM_UNLOCK();
525
526         return rc;
527 }
528
529 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
530                         uint16_t vlan_count,
531                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
532 {
533         int rc = 0;
534         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
535         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
536                                                 bp->hwrm_cmd_resp_addr;
537
538         /*
539          * Older HWRM versions did not support this command, and the set_rx_mask
540          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
541          * removed from set_rx_mask call, and this command was added.
542          *
543          * This command is also present from 1.7.8.11 and higher,
544          * as well as 1.7.8.0
545          */
546         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
547                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
548                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
549                                         (11)))
550                                 return 0;
551                 }
552         }
553         HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
554         req.fid = rte_cpu_to_le_16(fid);
555
556         req.vlan_tag_mask_tbl_addr =
557                 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
558         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
559
560         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
561
562         HWRM_CHECK_RESULT();
563         HWRM_UNLOCK();
564
565         return rc;
566 }
567
568 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
569                              struct bnxt_filter_info *filter)
570 {
571         int rc = 0;
572         struct bnxt_filter_info *l2_filter = filter;
573         struct bnxt_vnic_info *vnic = NULL;
574         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
575         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
576
577         if (filter->fw_l2_filter_id == UINT64_MAX)
578                 return 0;
579
580         if (filter->matching_l2_fltr_ptr)
581                 l2_filter = filter->matching_l2_fltr_ptr;
582
583         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
584                     filter, l2_filter, l2_filter->l2_ref_cnt);
585
586         if (l2_filter->l2_ref_cnt == 0)
587                 return 0;
588
589         if (l2_filter->l2_ref_cnt > 0)
590                 l2_filter->l2_ref_cnt--;
591
592         if (l2_filter->l2_ref_cnt > 0)
593                 return 0;
594
595         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
596
597         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
598
599         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
600
601         HWRM_CHECK_RESULT();
602         HWRM_UNLOCK();
603
604         filter->fw_l2_filter_id = UINT64_MAX;
605         if (l2_filter->l2_ref_cnt == 0) {
606                 vnic = l2_filter->vnic;
607                 if (vnic) {
608                         STAILQ_REMOVE(&vnic->filter, l2_filter,
609                                       bnxt_filter_info, next);
610                         bnxt_free_filter(bp, l2_filter);
611                 }
612         }
613
614         return 0;
615 }
616
617 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
618                          uint16_t dst_id,
619                          struct bnxt_filter_info *filter)
620 {
621         int rc = 0;
622         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
623         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
624         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
625         const struct rte_eth_vmdq_rx_conf *conf =
626                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
627         uint32_t enables = 0;
628         uint16_t j = dst_id - 1;
629
630         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
631         if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
632             conf->pool_map[j].pools & (1UL << j)) {
633                 PMD_DRV_LOG(DEBUG,
634                         "Add vlan %u to vmdq pool %u\n",
635                         conf->pool_map[j].vlan_id, j);
636
637                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
638                 filter->enables |=
639                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
640                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
641         }
642
643         if (filter->fw_l2_filter_id != UINT64_MAX)
644                 bnxt_hwrm_clear_l2_filter(bp, filter);
645
646         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
647
648         /* PMD does not support XDP and RoCE */
649         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
650                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
651         req.flags = rte_cpu_to_le_32(filter->flags);
652
653         enables = filter->enables |
654               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
655         req.dst_id = rte_cpu_to_le_16(dst_id);
656
657         if (enables &
658             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
659                 memcpy(req.l2_addr, filter->l2_addr,
660                        RTE_ETHER_ADDR_LEN);
661         if (enables &
662             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
663                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
664                        RTE_ETHER_ADDR_LEN);
665         if (enables &
666             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
667                 req.l2_ovlan = filter->l2_ovlan;
668         if (enables &
669             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
670                 req.l2_ivlan = filter->l2_ivlan;
671         if (enables &
672             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
673                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
674         if (enables &
675             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
676                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
677         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
678                 req.src_id = rte_cpu_to_le_32(filter->src_id);
679         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
680                 req.src_type = filter->src_type;
681         if (filter->pri_hint) {
682                 req.pri_hint = filter->pri_hint;
683                 req.l2_filter_id_hint =
684                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
685         }
686
687         req.enables = rte_cpu_to_le_32(enables);
688
689         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
690
691         HWRM_CHECK_RESULT();
692
693         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
694         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
695         HWRM_UNLOCK();
696
697         filter->l2_ref_cnt++;
698
699         return rc;
700 }
701
702 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
703 {
704         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
705         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
706         uint32_t flags = 0;
707         int rc;
708
709         if (!ptp)
710                 return 0;
711
712         HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
713
714         if (ptp->rx_filter)
715                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
716         else
717                 flags |=
718                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
719         if (ptp->tx_tstamp_en)
720                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
721         else
722                 flags |=
723                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
724         req.flags = rte_cpu_to_le_32(flags);
725         req.enables = rte_cpu_to_le_32
726                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
727         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
728
729         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
730         HWRM_UNLOCK();
731
732         return rc;
733 }
734
735 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
736 {
737         int rc = 0;
738         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
739         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
740         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
741
742         if (ptp)
743                 return 0;
744
745         HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
746
747         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
748
749         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
750
751         HWRM_CHECK_RESULT();
752
753         if (BNXT_CHIP_P5(bp)) {
754                 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS))
755                         return 0;
756         } else {
757                 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
758                         return 0;
759         }
760
761         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
762                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
763
764         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
765         if (!ptp)
766                 return -ENOMEM;
767
768         if (!BNXT_CHIP_P5(bp)) {
769                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
770                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
771                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
772                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
773                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
774                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
775                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
776                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
777                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
778                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
779                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
780                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
781                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
782                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
783                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
784                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
785                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
786                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
787         }
788
789         ptp->bp = bp;
790         bp->ptp_cfg = ptp;
791
792         return 0;
793 }
794
795 void bnxt_free_vf_info(struct bnxt *bp)
796 {
797         int i;
798
799         if (bp->pf == NULL)
800                 return;
801
802         if (bp->pf->vf_info == NULL)
803                 return;
804
805         for (i = 0; i < bp->pf->max_vfs; i++) {
806                 rte_free(bp->pf->vf_info[i].vlan_table);
807                 bp->pf->vf_info[i].vlan_table = NULL;
808                 rte_free(bp->pf->vf_info[i].vlan_as_table);
809                 bp->pf->vf_info[i].vlan_as_table = NULL;
810         }
811         rte_free(bp->pf->vf_info);
812         bp->pf->vf_info = NULL;
813 }
814
815 static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs)
816 {
817         struct bnxt_child_vf_info *vf_info = bp->pf->vf_info;
818         int i;
819
820         if (vf_info)
821                 bnxt_free_vf_info(bp);
822
823         vf_info = rte_zmalloc("bnxt_vf_info", sizeof(*vf_info) * max_vfs, 0);
824         if (vf_info == NULL) {
825                 PMD_DRV_LOG(ERR, "Failed to alloc vf info\n");
826                 return -ENOMEM;
827         }
828
829         bp->pf->max_vfs = max_vfs;
830         for (i = 0; i < max_vfs; i++) {
831                 vf_info[i].fid = bp->pf->first_vf_id + i;
832                 vf_info[i].vlan_table = rte_zmalloc("VF VLAN table",
833                                                     getpagesize(), getpagesize());
834                 if (vf_info[i].vlan_table == NULL) {
835                         PMD_DRV_LOG(ERR, "Failed to alloc VLAN table for VF %d\n", i);
836                         goto err;
837                 }
838                 rte_mem_lock_page(vf_info[i].vlan_table);
839
840                 vf_info[i].vlan_as_table = rte_zmalloc("VF VLAN AS table",
841                                                        getpagesize(), getpagesize());
842                 if (vf_info[i].vlan_as_table == NULL) {
843                         PMD_DRV_LOG(ERR, "Failed to alloc VLAN AS table for VF %d\n", i);
844                         goto err;
845                 }
846                 rte_mem_lock_page(vf_info[i].vlan_as_table);
847
848                 STAILQ_INIT(&vf_info[i].filter);
849         }
850
851         bp->pf->vf_info = vf_info;
852
853         return 0;
854 err:
855         bnxt_free_vf_info(bp);
856         return -ENOMEM;
857 }
858
859 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
860 {
861         int rc = 0;
862         struct hwrm_func_qcaps_input req = {.req_type = 0 };
863         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
864         uint16_t new_max_vfs;
865         uint32_t flags;
866
867         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
868
869         req.fid = rte_cpu_to_le_16(0xffff);
870
871         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
872
873         HWRM_CHECK_RESULT();
874
875         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
876         flags = rte_le_to_cpu_32(resp->flags);
877         if (BNXT_PF(bp)) {
878                 bp->pf->port_id = resp->port_id;
879                 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
880                 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
881                 new_max_vfs = bp->pdev->max_vfs;
882                 if (new_max_vfs != bp->pf->max_vfs) {
883                         rc = bnxt_alloc_vf_info(bp, new_max_vfs);
884                         if (rc)
885                                 goto unlock;
886                 }
887         }
888
889         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
890         if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
891                 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
892                 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
893         } else {
894                 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
895         }
896         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
897         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
898         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
899         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
900         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
901         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
902         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
903         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
904                 bp->max_l2_ctx += bp->max_rx_em_flows;
905         /* TODO: For now, do not support VMDq/RFS on VFs. */
906         if (BNXT_PF(bp)) {
907                 if (bp->pf->max_vfs)
908                         bp->max_vnics = 1;
909                 else
910                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
911         } else {
912                 bp->max_vnics = 1;
913         }
914         PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
915                     bp->max_l2_ctx, bp->max_vnics);
916         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
917         bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters);
918
919         if (BNXT_PF(bp)) {
920                 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
921                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
922                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
923                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
924                         HWRM_UNLOCK();
925                         bnxt_hwrm_ptp_qcfg(bp);
926                 }
927         }
928
929         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
930                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
931
932         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
933                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
934                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
935         }
936
937         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
938                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
939
940         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
941                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
942
943         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
944                 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
945
946         if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) {
947                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
948                 PMD_DRV_LOG(DEBUG, "VLAN acceleration for TX is enabled\n");
949         }
950 unlock:
951         HWRM_UNLOCK();
952
953         return rc;
954 }
955
956 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
957 {
958         int rc;
959
960         rc = __bnxt_hwrm_func_qcaps(bp);
961         if (rc == -ENOMEM)
962                 return rc;
963
964         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
965                 rc = bnxt_alloc_ctx_mem(bp);
966                 if (rc)
967                         return rc;
968
969                 /* On older FW,
970                  * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
971                  * But the error can be ignored. Return success.
972                  */
973                 rc = bnxt_hwrm_func_resc_qcaps(bp);
974                 if (!rc)
975                         bp->flags |= BNXT_FLAG_NEW_RM;
976         }
977
978         return 0;
979 }
980
981 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
982 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
983 {
984         int rc = 0;
985         uint32_t flags;
986         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
987         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
988
989         HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
990
991         req.target_id = rte_cpu_to_le_16(0xffff);
992
993         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
994
995         HWRM_CHECK_RESULT();
996
997         flags = rte_le_to_cpu_32(resp->flags);
998
999         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
1000                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
1001                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
1002         }
1003
1004         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
1005                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
1006
1007         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
1008                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
1009
1010         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP) {
1011                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_VLAN_RX_STRIP;
1012                 PMD_DRV_LOG(DEBUG, "Rx VLAN strip capability enabled\n");
1013         }
1014
1015         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
1016
1017         HWRM_UNLOCK();
1018
1019         return rc;
1020 }
1021
1022 int bnxt_hwrm_func_reset(struct bnxt *bp)
1023 {
1024         int rc = 0;
1025         struct hwrm_func_reset_input req = {.req_type = 0 };
1026         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
1027
1028         HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
1029
1030         req.enables = rte_cpu_to_le_32(0);
1031
1032         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1033
1034         HWRM_CHECK_RESULT();
1035         HWRM_UNLOCK();
1036
1037         return rc;
1038 }
1039
1040 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
1041 {
1042         int rc;
1043         uint32_t flags = 0;
1044         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
1045         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
1046
1047         if (bp->flags & BNXT_FLAG_REGISTERED)
1048                 return 0;
1049
1050         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
1051                 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
1052         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1053                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
1054
1055         /* PFs and trusted VFs should indicate the support of the
1056          * Master capability on non Stingray platform
1057          */
1058         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
1059                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
1060
1061         HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
1062         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
1063                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1064         req.ver_maj_8b = RTE_VER_YEAR;
1065         req.ver_min_8b = RTE_VER_MONTH;
1066         req.ver_upd_8b = RTE_VER_MINOR;
1067
1068         if (BNXT_PF(bp)) {
1069                 req.enables |= rte_cpu_to_le_32(
1070                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
1071                 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
1072                        RTE_MIN(sizeof(req.vf_req_fwd),
1073                                sizeof(bp->pf->vf_req_fwd)));
1074         }
1075
1076         req.flags = rte_cpu_to_le_32(flags);
1077
1078         req.async_event_fwd[0] |=
1079                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
1080                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
1081                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
1082                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
1083                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
1084         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1085                 req.async_event_fwd[0] |=
1086                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
1087         req.async_event_fwd[1] |=
1088                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
1089                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
1090         if (BNXT_PF(bp))
1091                 req.async_event_fwd[1] |=
1092                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
1093
1094         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
1095                 req.async_event_fwd[1] |=
1096                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
1097
1098         req.async_event_fwd[2] |=
1099                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST |
1100                                  ASYNC_CMPL_EVENT_ID_ERROR_REPORT);
1101
1102         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1103
1104         HWRM_CHECK_RESULT();
1105
1106         flags = rte_le_to_cpu_32(resp->flags);
1107         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
1108                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
1109
1110         HWRM_UNLOCK();
1111
1112         bp->flags |= BNXT_FLAG_REGISTERED;
1113
1114         return rc;
1115 }
1116
1117 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
1118 {
1119         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
1120                 return 0;
1121
1122         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
1123 }
1124
1125 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
1126 {
1127         int rc;
1128         uint32_t flags = 0;
1129         uint32_t enables;
1130         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1131         struct hwrm_func_vf_cfg_input req = {0};
1132
1133         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
1134
1135         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
1136                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
1137                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
1138                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1139                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
1140
1141         if (BNXT_HAS_RING_GRPS(bp)) {
1142                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
1143                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
1144         }
1145
1146         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
1147         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
1148                                             AGG_RING_MULTIPLIER);
1149         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
1150         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
1151                                               bp->tx_nr_rings +
1152                                               BNXT_NUM_ASYNC_CPR(bp));
1153         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
1154         if (bp->vf_resv_strategy ==
1155             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1156                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1157                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1158                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1159                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1160                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1161                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1162         } else if (bp->vf_resv_strategy ==
1163                    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1164                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1165                 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1166         }
1167
1168         if (test)
1169                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1170                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1171                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1172                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1173                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1174                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1175
1176         if (test && BNXT_HAS_RING_GRPS(bp))
1177                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1178
1179         req.flags = rte_cpu_to_le_32(flags);
1180         req.enables |= rte_cpu_to_le_32(enables);
1181
1182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1183
1184         if (test)
1185                 HWRM_CHECK_RESULT_SILENT();
1186         else
1187                 HWRM_CHECK_RESULT();
1188
1189         HWRM_UNLOCK();
1190         return rc;
1191 }
1192
1193 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1194 {
1195         int rc;
1196         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1197         struct hwrm_func_resource_qcaps_input req = {0};
1198
1199         HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1200         req.fid = rte_cpu_to_le_16(0xffff);
1201
1202         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1203
1204         HWRM_CHECK_RESULT_SILENT();
1205
1206         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1207         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1208         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1209         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1210         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1211         /* func_resource_qcaps does not return max_rx_em_flows.
1212          * So use the value provided by func_qcaps.
1213          */
1214         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1215         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
1216                 bp->max_l2_ctx += bp->max_rx_em_flows;
1217         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1218         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1219         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1220         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1221         if (bp->vf_resv_strategy >
1222             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1223                 bp->vf_resv_strategy =
1224                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1225
1226         HWRM_UNLOCK();
1227         return rc;
1228 }
1229
1230 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1231 {
1232         int rc = 0;
1233         struct hwrm_ver_get_input req = {.req_type = 0 };
1234         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1235         uint32_t fw_version;
1236         uint16_t max_resp_len;
1237         char type[RTE_MEMZONE_NAMESIZE];
1238         uint32_t dev_caps_cfg;
1239
1240         bp->max_req_len = HWRM_MAX_REQ_LEN;
1241         bp->hwrm_cmd_timeout = timeout;
1242         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1243
1244         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1245         req.hwrm_intf_min = HWRM_VERSION_MINOR;
1246         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1247
1248         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1249
1250         if (bp->flags & BNXT_FLAG_FW_RESET)
1251                 HWRM_CHECK_RESULT_SILENT();
1252         else
1253                 HWRM_CHECK_RESULT();
1254
1255         if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY) {
1256                 rc = -EAGAIN;
1257                 goto error;
1258         }
1259
1260         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
1261                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1262                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1263                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
1264                 resp->hwrm_fw_rsvd_8b);
1265         bp->fw_ver = ((uint32_t)resp->hwrm_fw_maj_8b << 24) |
1266                      ((uint32_t)resp->hwrm_fw_min_8b << 16) |
1267                      ((uint32_t)resp->hwrm_fw_bld_8b << 8) |
1268                      resp->hwrm_fw_rsvd_8b;
1269         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1270                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1271
1272         fw_version = resp->hwrm_intf_maj_8b << 16;
1273         fw_version |= resp->hwrm_intf_min_8b << 8;
1274         fw_version |= resp->hwrm_intf_upd_8b;
1275         bp->hwrm_spec_code = fw_version;
1276
1277         /* def_req_timeout value is in milliseconds */
1278         bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1279         /* convert timeout to usec */
1280         bp->hwrm_cmd_timeout *= 1000;
1281         if (!bp->hwrm_cmd_timeout)
1282                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1283
1284         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1285                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1286                 rc = -EINVAL;
1287                 goto error;
1288         }
1289
1290         if (bp->max_req_len > resp->max_req_win_len) {
1291                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1292                 rc = -EINVAL;
1293                 goto error;
1294         }
1295
1296         bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
1297
1298         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1299         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1300         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1301                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1302
1303         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1304         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1305
1306         RTE_VERIFY(max_resp_len <= bp->max_resp_len);
1307         bp->max_resp_len = max_resp_len;
1308
1309         if ((dev_caps_cfg &
1310                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1311             (dev_caps_cfg &
1312              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1313                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1314                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1315         }
1316
1317         if (((dev_caps_cfg &
1318               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1319              (dev_caps_cfg &
1320               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1321             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1322                 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1323                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1324                         bp->pdev->addr.devid, bp->pdev->addr.function);
1325
1326                 rte_free(bp->hwrm_short_cmd_req_addr);
1327
1328                 bp->hwrm_short_cmd_req_addr =
1329                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1330                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1331                         rc = -ENOMEM;
1332                         goto error;
1333                 }
1334                 bp->hwrm_short_cmd_req_dma_addr =
1335                         rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1336                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1337                         rte_free(bp->hwrm_short_cmd_req_addr);
1338                         PMD_DRV_LOG(ERR,
1339                                 "Unable to map buffer to physical memory.\n");
1340                         rc = -ENOMEM;
1341                         goto error;
1342                 }
1343         }
1344         if (dev_caps_cfg &
1345             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1346                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1347                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1348         }
1349         if (dev_caps_cfg &
1350             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1351                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1352         if (dev_caps_cfg &
1353             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1354                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1355                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1356         }
1357
1358         if (dev_caps_cfg &
1359             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1360                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1361                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1362         }
1363
1364         if (dev_caps_cfg &
1365             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED) {
1366                 PMD_DRV_LOG(DEBUG, "Host-based truflow feature enabled.\n");
1367                 bp->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN;
1368         }
1369
1370 error:
1371         HWRM_UNLOCK();
1372         return rc;
1373 }
1374
1375 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp)
1376 {
1377         int rc;
1378         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1379         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1380
1381         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1382                 return 0;
1383
1384         HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1385
1386         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1387
1388         HWRM_CHECK_RESULT();
1389         HWRM_UNLOCK();
1390
1391         PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n",
1392                     bp->eth_dev->data->port_id);
1393
1394         return rc;
1395 }
1396
1397 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1398 {
1399         int rc = 0;
1400         struct hwrm_port_phy_cfg_input req = {0};
1401         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1402         uint32_t enables = 0;
1403
1404         HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1405
1406         if (conf->link_up) {
1407                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1408                 if (bp->link_info->auto_mode && conf->link_speed) {
1409                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1410                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1411                 }
1412
1413                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1414                 /*
1415                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1416                  * any auto mode, even "none".
1417                  */
1418                 if (!conf->link_speed) {
1419                         /* No speeds specified. Enable AutoNeg - all speeds */
1420                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1421                         req.auto_mode =
1422                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1423                 } else {
1424                         if (bp->link_info->link_signal_mode) {
1425                                 enables |=
1426                                 HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1427                                 req.force_pam4_link_speed =
1428                                         rte_cpu_to_le_16(conf->link_speed);
1429                         } else {
1430                                 req.force_link_speed =
1431                                         rte_cpu_to_le_16(conf->link_speed);
1432                         }
1433                 }
1434                 /* AutoNeg - Advertise speeds specified. */
1435                 if (conf->auto_link_speed_mask &&
1436                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1437                         req.auto_mode =
1438                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1439                         req.auto_link_speed_mask =
1440                                 conf->auto_link_speed_mask;
1441                         if (conf->auto_pam4_link_speeds) {
1442                                 enables |=
1443                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1444                                 req.auto_link_pam4_speed_mask =
1445                                         conf->auto_pam4_link_speeds;
1446                         } else {
1447                                 enables |=
1448                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1449                         }
1450                 }
1451                 if (conf->auto_link_speed &&
1452                 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1453                         enables |=
1454                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1455
1456                 req.auto_duplex = conf->duplex;
1457                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1458                 req.auto_pause = conf->auto_pause;
1459                 req.force_pause = conf->force_pause;
1460                 /* Set force_pause if there is no auto or if there is a force */
1461                 if (req.auto_pause && !req.force_pause)
1462                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1463                 else
1464                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1465
1466                 req.enables = rte_cpu_to_le_32(enables);
1467         } else {
1468                 req.flags =
1469                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1470                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1471         }
1472
1473         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1474
1475         HWRM_CHECK_RESULT();
1476         HWRM_UNLOCK();
1477
1478         return rc;
1479 }
1480
1481 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1482                                    struct bnxt_link_info *link_info)
1483 {
1484         int rc = 0;
1485         struct hwrm_port_phy_qcfg_input req = {0};
1486         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1487
1488         HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1489
1490         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1491
1492         HWRM_CHECK_RESULT();
1493
1494         link_info->phy_link_status = resp->link;
1495         link_info->link_up =
1496                 (link_info->phy_link_status ==
1497                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1498         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1499         link_info->duplex = resp->duplex_cfg;
1500         link_info->pause = resp->pause;
1501         link_info->auto_pause = resp->auto_pause;
1502         link_info->force_pause = resp->force_pause;
1503         link_info->auto_mode = resp->auto_mode;
1504         link_info->phy_type = resp->phy_type;
1505         link_info->media_type = resp->media_type;
1506
1507         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1508         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1509         link_info->auto_link_speed_mask = rte_le_to_cpu_16(resp->auto_link_speed_mask);
1510         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1511         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1512         link_info->phy_ver[0] = resp->phy_maj;
1513         link_info->phy_ver[1] = resp->phy_min;
1514         link_info->phy_ver[2] = resp->phy_bld;
1515         link_info->link_signal_mode =
1516                 rte_le_to_cpu_16(resp->active_fec_signal_mode);
1517         link_info->force_pam4_link_speed =
1518                         rte_le_to_cpu_16(resp->force_pam4_link_speed);
1519         link_info->support_pam4_speeds =
1520                         rte_le_to_cpu_16(resp->support_pam4_speeds);
1521         link_info->auto_pam4_link_speeds =
1522                         rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1523         link_info->module_status = resp->module_status;
1524         HWRM_UNLOCK();
1525
1526         PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1527                     link_info->link_speed, link_info->auto_mode,
1528                     link_info->auto_link_speed, link_info->auto_link_speed_mask,
1529                     link_info->support_speeds, link_info->force_link_speed);
1530         PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
1531                     link_info->link_signal_mode,
1532                     link_info->auto_pam4_link_speeds,
1533                     link_info->support_pam4_speeds,
1534                     link_info->force_pam4_link_speed);
1535         return rc;
1536 }
1537
1538 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1539 {
1540         int rc = 0;
1541         struct hwrm_port_phy_qcaps_input req = {0};
1542         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1543         struct bnxt_link_info *link_info = bp->link_info;
1544
1545         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1546                 return 0;
1547
1548         HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1549
1550         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1551
1552         HWRM_CHECK_RESULT_SILENT();
1553
1554         bp->port_cnt = resp->port_cnt;
1555         if (resp->supported_speeds_auto_mode)
1556                 link_info->support_auto_speeds =
1557                         rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1558         if (resp->supported_pam4_speeds_auto_mode)
1559                 link_info->support_pam4_auto_speeds =
1560                         rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1561
1562         HWRM_UNLOCK();
1563
1564         /* Older firmware does not have supported_auto_speeds, so assume
1565          * that all supported speeds can be autonegotiated.
1566          */
1567         if (link_info->auto_link_speed_mask && !link_info->support_auto_speeds)
1568                 link_info->support_auto_speeds = link_info->support_speeds;
1569
1570         return 0;
1571 }
1572
1573 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1574 {
1575         int i = 0;
1576
1577         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1578                 if (bp->tx_cos_queue[i].profile ==
1579                     HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1580                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1581                         return true;
1582                 }
1583         }
1584         return false;
1585 }
1586
1587 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1588 {
1589         int i = 0;
1590
1591         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1592                 if (bp->tx_cos_queue[i].profile !=
1593                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1594                     bp->tx_cos_queue[i].id !=
1595                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1596                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1597                         break;
1598                 }
1599         }
1600 }
1601
1602 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1603 {
1604         int rc = 0;
1605         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1606         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1607         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1608         int i;
1609
1610 get_rx_info:
1611         HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1612
1613         req.flags = rte_cpu_to_le_32(dir);
1614         /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1615         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1616             !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1617                 req.drv_qmap_cap =
1618                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1619         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1620
1621         HWRM_CHECK_RESULT();
1622
1623         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1624                 GET_TX_QUEUE_INFO(0);
1625                 GET_TX_QUEUE_INFO(1);
1626                 GET_TX_QUEUE_INFO(2);
1627                 GET_TX_QUEUE_INFO(3);
1628                 GET_TX_QUEUE_INFO(4);
1629                 GET_TX_QUEUE_INFO(5);
1630                 GET_TX_QUEUE_INFO(6);
1631                 GET_TX_QUEUE_INFO(7);
1632         } else  {
1633                 GET_RX_QUEUE_INFO(0);
1634                 GET_RX_QUEUE_INFO(1);
1635                 GET_RX_QUEUE_INFO(2);
1636                 GET_RX_QUEUE_INFO(3);
1637                 GET_RX_QUEUE_INFO(4);
1638                 GET_RX_QUEUE_INFO(5);
1639                 GET_RX_QUEUE_INFO(6);
1640                 GET_RX_QUEUE_INFO(7);
1641         }
1642
1643         HWRM_UNLOCK();
1644
1645         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1646                 goto done;
1647
1648         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1649                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1650         } else {
1651                 int j;
1652
1653                 /* iterate and find the COSq profile to use for Tx */
1654                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1655                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1656                                 if (bp->tx_cos_queue[i].id != 0xff)
1657                                         bp->tx_cosq_id[j++] =
1658                                                 bp->tx_cos_queue[i].id;
1659                         }
1660                 } else {
1661                         /* When CoS classification is disabled, for normal NIC
1662                          * operations, ideally we should look to use LOSSY.
1663                          * If not found, fallback to the first valid profile
1664                          */
1665                         if (!bnxt_find_lossy_profile(bp))
1666                                 bnxt_find_first_valid_profile(bp);
1667
1668                 }
1669         }
1670
1671         bp->max_tc = resp->max_configurable_queues;
1672         bp->max_lltc = resp->max_configurable_lossless_queues;
1673         if (bp->max_tc > BNXT_MAX_QUEUE)
1674                 bp->max_tc = BNXT_MAX_QUEUE;
1675         bp->max_q = bp->max_tc;
1676
1677         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1678                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1679                 goto get_rx_info;
1680         }
1681
1682 done:
1683         return rc;
1684 }
1685
1686 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1687                          struct bnxt_ring *ring,
1688                          uint32_t ring_type, uint32_t map_index,
1689                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1690                          uint16_t tx_cosq_id)
1691 {
1692         int rc = 0;
1693         uint32_t enables = 0;
1694         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1695         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1696         struct rte_mempool *mb_pool;
1697         uint16_t rx_buf_size;
1698
1699         HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1700
1701         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1702         req.fbo = rte_cpu_to_le_32(0);
1703         /* Association of ring index with doorbell index */
1704         req.logical_id = rte_cpu_to_le_16(map_index);
1705         req.length = rte_cpu_to_le_32(ring->ring_size);
1706
1707         switch (ring_type) {
1708         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1709                 req.ring_type = ring_type;
1710                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1711                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1712                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1713                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1714                         enables |=
1715                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1716                 break;
1717         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1718                 req.ring_type = ring_type;
1719                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1720                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1721                 if (BNXT_CHIP_P5(bp)) {
1722                         mb_pool = bp->rx_queues[0]->mb_pool;
1723                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1724                                       RTE_PKTMBUF_HEADROOM;
1725                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1726                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1727                         enables |=
1728                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1729                 }
1730                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1731                         enables |=
1732                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1733                 break;
1734         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1735                 req.ring_type = ring_type;
1736                 if (BNXT_HAS_NQ(bp)) {
1737                         /* Association of cp ring with nq */
1738                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1739                         enables |=
1740                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1741                 }
1742                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1743                 break;
1744         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1745                 req.ring_type = ring_type;
1746                 req.page_size = BNXT_PAGE_SHFT;
1747                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1748                 break;
1749         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1750                 req.ring_type = ring_type;
1751                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1752
1753                 mb_pool = bp->rx_queues[0]->mb_pool;
1754                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1755                               RTE_PKTMBUF_HEADROOM;
1756                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1757                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1758
1759                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1760                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1761                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1762                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1763                 break;
1764         default:
1765                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1766                         ring_type);
1767                 HWRM_UNLOCK();
1768                 return -EINVAL;
1769         }
1770         req.enables = rte_cpu_to_le_32(enables);
1771
1772         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1773
1774         if (rc || resp->error_code) {
1775                 if (rc == 0 && resp->error_code)
1776                         rc = rte_le_to_cpu_16(resp->error_code);
1777                 switch (ring_type) {
1778                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1779                         PMD_DRV_LOG(ERR,
1780                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1781                         HWRM_UNLOCK();
1782                         return rc;
1783                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1784                         PMD_DRV_LOG(ERR,
1785                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1786                         HWRM_UNLOCK();
1787                         return rc;
1788                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1789                         PMD_DRV_LOG(ERR,
1790                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1791                                     rc);
1792                         HWRM_UNLOCK();
1793                         return rc;
1794                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1795                         PMD_DRV_LOG(ERR,
1796                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1797                         HWRM_UNLOCK();
1798                         return rc;
1799                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1800                         PMD_DRV_LOG(ERR,
1801                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1802                         HWRM_UNLOCK();
1803                         return rc;
1804                 default:
1805                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1806                         HWRM_UNLOCK();
1807                         return rc;
1808                 }
1809         }
1810
1811         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1812         HWRM_UNLOCK();
1813         return rc;
1814 }
1815
1816 int bnxt_hwrm_ring_free(struct bnxt *bp,
1817                         struct bnxt_ring *ring, uint32_t ring_type,
1818                         uint16_t cp_ring_id)
1819 {
1820         int rc;
1821         struct hwrm_ring_free_input req = {.req_type = 0 };
1822         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1823
1824         if (ring->fw_ring_id == INVALID_HW_RING_ID)
1825                 return -EINVAL;
1826
1827         HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1828
1829         req.ring_type = ring_type;
1830         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1831         req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id);
1832
1833         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1834         ring->fw_ring_id = INVALID_HW_RING_ID;
1835
1836         if (rc || resp->error_code) {
1837                 if (rc == 0 && resp->error_code)
1838                         rc = rte_le_to_cpu_16(resp->error_code);
1839                 HWRM_UNLOCK();
1840
1841                 switch (ring_type) {
1842                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1843                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1844                                 rc);
1845                         return rc;
1846                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1847                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1848                                 rc);
1849                         return rc;
1850                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1851                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1852                                 rc);
1853                         return rc;
1854                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1855                         PMD_DRV_LOG(ERR,
1856                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1857                         return rc;
1858                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1859                         PMD_DRV_LOG(ERR,
1860                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1861                         return rc;
1862                 default:
1863                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1864                         return rc;
1865                 }
1866         }
1867         HWRM_UNLOCK();
1868         return 0;
1869 }
1870
1871 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1872 {
1873         int rc = 0;
1874         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1875         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1876
1877         /* Don't attempt to re-create the ring group if it is already created */
1878         if (bp->grp_info[idx].fw_grp_id != INVALID_HW_RING_ID)
1879                 return 0;
1880
1881         HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1882
1883         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1884         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1885         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1886         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1887
1888         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1889
1890         HWRM_CHECK_RESULT();
1891
1892         bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1893
1894         HWRM_UNLOCK();
1895
1896         return rc;
1897 }
1898
1899 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1900 {
1901         int rc;
1902         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1903         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1904
1905         if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1906                 return 0;
1907
1908         HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1909
1910         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1911
1912         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1913
1914         HWRM_CHECK_RESULT();
1915         HWRM_UNLOCK();
1916
1917         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1918         return rc;
1919 }
1920
1921 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1922 {
1923         int rc = 0;
1924         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1925         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1926
1927         if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
1928                 return rc;
1929
1930         HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1931
1932         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1933
1934         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1935
1936         HWRM_CHECK_RESULT();
1937         HWRM_UNLOCK();
1938
1939         return rc;
1940 }
1941
1942 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1943 {
1944         int rc;
1945         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1946         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1947
1948         if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE)
1949                 return 0;
1950
1951         HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1952
1953         req.update_period_ms = rte_cpu_to_le_32(0);
1954
1955         req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1956
1957         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1958
1959         HWRM_CHECK_RESULT();
1960
1961         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1962
1963         HWRM_UNLOCK();
1964
1965         return rc;
1966 }
1967
1968 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1969 {
1970         int rc;
1971         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1972         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1973
1974         if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE)
1975                 return 0;
1976
1977         HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1978
1979         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1980
1981         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1982
1983         HWRM_CHECK_RESULT();
1984         HWRM_UNLOCK();
1985
1986         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1987
1988         return rc;
1989 }
1990
1991 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1992 {
1993         int rc = 0, i, j;
1994         struct hwrm_vnic_alloc_input req = { 0 };
1995         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1996
1997         if (!BNXT_HAS_RING_GRPS(bp))
1998                 goto skip_ring_grps;
1999
2000         /* map ring groups to this vnic */
2001         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
2002                 vnic->start_grp_id, vnic->end_grp_id);
2003         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
2004                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
2005
2006         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
2007         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
2008         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
2009         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
2010
2011 skip_ring_grps:
2012         vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
2013         HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
2014
2015         if (vnic->func_default)
2016                 req.flags =
2017                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2018         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2019
2020         HWRM_CHECK_RESULT();
2021
2022         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
2023         HWRM_UNLOCK();
2024         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2025         return rc;
2026 }
2027
2028 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
2029                                         struct bnxt_vnic_info *vnic,
2030                                         struct bnxt_plcmodes_cfg *pmode)
2031 {
2032         int rc = 0;
2033         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
2034         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2035
2036         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
2037
2038         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2039
2040         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2041
2042         HWRM_CHECK_RESULT();
2043
2044         pmode->flags = rte_le_to_cpu_32(resp->flags);
2045         /* dflt_vnic bit doesn't exist in the _cfg command */
2046         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
2047         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
2048         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
2049         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
2050
2051         HWRM_UNLOCK();
2052
2053         return rc;
2054 }
2055
2056 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
2057                                        struct bnxt_vnic_info *vnic,
2058                                        struct bnxt_plcmodes_cfg *pmode)
2059 {
2060         int rc = 0;
2061         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2062         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2063
2064         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2065                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2066                 return rc;
2067         }
2068
2069         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2070
2071         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2072         req.flags = rte_cpu_to_le_32(pmode->flags);
2073         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
2074         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
2075         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
2076         req.enables = rte_cpu_to_le_32(
2077             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
2078             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
2079             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
2080         );
2081
2082         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2083
2084         HWRM_CHECK_RESULT();
2085         HWRM_UNLOCK();
2086
2087         return rc;
2088 }
2089
2090 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2091 {
2092         int rc = 0;
2093         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
2094         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2095         struct bnxt_plcmodes_cfg pmodes = { 0 };
2096         uint32_t ctx_enable_flag = 0;
2097         uint32_t enables = 0;
2098
2099         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2100                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2101                 return rc;
2102         }
2103
2104         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
2105         if (rc)
2106                 return rc;
2107
2108         HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
2109
2110         if (BNXT_CHIP_P5(bp)) {
2111                 int dflt_rxq = vnic->start_grp_id;
2112                 struct bnxt_rx_ring_info *rxr;
2113                 struct bnxt_cp_ring_info *cpr;
2114                 struct bnxt_rx_queue *rxq;
2115                 int i;
2116
2117                 /*
2118                  * The first active receive ring is used as the VNIC
2119                  * default receive ring. If there are no active receive
2120                  * rings (all corresponding receive queues are stopped),
2121                  * the first receive ring is used.
2122                  */
2123                 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
2124                         rxq = bp->eth_dev->data->rx_queues[i];
2125                         if (rxq->rx_started) {
2126                                 dflt_rxq = i;
2127                                 break;
2128                         }
2129                 }
2130
2131                 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
2132                 rxr = rxq->rx_ring;
2133                 cpr = rxq->cp_ring;
2134
2135                 req.default_rx_ring_id =
2136                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
2137                 req.default_cmpl_ring_id =
2138                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
2139                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
2140                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
2141                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
2142                         enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE;
2143                         req.rx_csum_v2_mode =
2144                                 HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK;
2145                 }
2146                 goto config_mru;
2147         }
2148
2149         /* Only RSS support for now TBD: COS & LB */
2150         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
2151         if (vnic->lb_rule != 0xffff)
2152                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
2153         if (vnic->cos_rule != 0xffff)
2154                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
2155         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
2156                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
2157                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
2158         }
2159         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
2160                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
2161                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
2162         }
2163
2164         enables |= ctx_enable_flag;
2165         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
2166         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
2167         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
2168         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
2169
2170 config_mru:
2171         req.enables = rte_cpu_to_le_32(enables);
2172         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2173         req.mru = rte_cpu_to_le_16(vnic->mru);
2174         /* Configure default VNIC only once. */
2175         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
2176                 req.flags |=
2177                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2178                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
2179         }
2180         if (vnic->vlan_strip)
2181                 req.flags |=
2182                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2183         if (vnic->bd_stall)
2184                 req.flags |=
2185                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2186         if (vnic->rss_dflt_cr)
2187                 req.flags |= rte_cpu_to_le_32(
2188                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2189
2190         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2191
2192         HWRM_CHECK_RESULT();
2193         HWRM_UNLOCK();
2194
2195         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2196
2197         return rc;
2198 }
2199
2200 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2201                 int16_t fw_vf_id)
2202 {
2203         int rc = 0;
2204         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2205         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2206
2207         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2208                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2209                 return rc;
2210         }
2211         HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2212
2213         req.enables =
2214                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2215         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2216         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2217
2218         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2219
2220         HWRM_CHECK_RESULT();
2221
2222         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2223         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2224         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2225         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2226         vnic->mru = rte_le_to_cpu_16(resp->mru);
2227         vnic->func_default = rte_le_to_cpu_32(
2228                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2229         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2230                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2231         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2232                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2233         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2234                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2235
2236         HWRM_UNLOCK();
2237
2238         return rc;
2239 }
2240
2241 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2242                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2243 {
2244         int rc = 0;
2245         uint16_t ctx_id;
2246         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2247         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2248                                                 bp->hwrm_cmd_resp_addr;
2249
2250         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2251
2252         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2253         HWRM_CHECK_RESULT();
2254
2255         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2256         if (!BNXT_HAS_RING_GRPS(bp))
2257                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2258         else if (ctx_idx == 0)
2259                 vnic->rss_rule = ctx_id;
2260
2261         HWRM_UNLOCK();
2262
2263         return rc;
2264 }
2265
2266 static
2267 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2268                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2269 {
2270         int rc = 0;
2271         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2272         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2273                                                 bp->hwrm_cmd_resp_addr;
2274
2275         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2276                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2277                 return rc;
2278         }
2279         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2280
2281         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2282
2283         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2284
2285         HWRM_CHECK_RESULT();
2286         HWRM_UNLOCK();
2287
2288         return rc;
2289 }
2290
2291 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2292 {
2293         int rc = 0;
2294
2295         if (BNXT_CHIP_P5(bp)) {
2296                 int j;
2297
2298                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2299                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
2300                                                       vnic,
2301                                                       vnic->fw_grp_ids[j]);
2302                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2303                 }
2304                 vnic->num_lb_ctxts = 0;
2305         } else {
2306                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2307                 vnic->rss_rule = INVALID_HW_RING_ID;
2308         }
2309
2310         return rc;
2311 }
2312
2313 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2314 {
2315         int rc = 0;
2316         struct hwrm_vnic_free_input req = {.req_type = 0 };
2317         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2318
2319         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2320                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2321                 return rc;
2322         }
2323
2324         HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2325
2326         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2327
2328         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2329
2330         HWRM_CHECK_RESULT();
2331         HWRM_UNLOCK();
2332
2333         vnic->fw_vnic_id = INVALID_HW_RING_ID;
2334         /* Configure default VNIC again if necessary. */
2335         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2336                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2337
2338         return rc;
2339 }
2340
2341 static int
2342 bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2343 {
2344         int i;
2345         int rc = 0;
2346         int nr_ctxs = vnic->num_lb_ctxts;
2347         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2348         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2349
2350         for (i = 0; i < nr_ctxs; i++) {
2351                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2352
2353                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2354                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2355                 req.hash_mode_flags = vnic->hash_mode;
2356
2357                 req.hash_key_tbl_addr =
2358                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2359
2360                 req.ring_grp_tbl_addr =
2361                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2362                                          i * HW_HASH_INDEX_SIZE);
2363                 req.ring_table_pair_index = i;
2364                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2365
2366                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2367                                             BNXT_USE_CHIMP_MB);
2368
2369                 HWRM_CHECK_RESULT();
2370                 HWRM_UNLOCK();
2371         }
2372
2373         return rc;
2374 }
2375
2376 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2377                            struct bnxt_vnic_info *vnic)
2378 {
2379         int rc = 0;
2380         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2381         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2382
2383         if (!vnic->rss_table)
2384                 return 0;
2385
2386         if (BNXT_CHIP_P5(bp))
2387                 return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
2388
2389         HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2390
2391         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2392         req.hash_mode_flags = vnic->hash_mode;
2393
2394         req.ring_grp_tbl_addr =
2395             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2396         req.hash_key_tbl_addr =
2397             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2398         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2399         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2400
2401         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2402
2403         HWRM_CHECK_RESULT();
2404         HWRM_UNLOCK();
2405
2406         return rc;
2407 }
2408
2409 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2410                         struct bnxt_vnic_info *vnic)
2411 {
2412         int rc = 0;
2413         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2414         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2415         uint16_t size;
2416
2417         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2418                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2419                 return rc;
2420         }
2421
2422         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2423
2424         req.flags = rte_cpu_to_le_32(
2425                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2426
2427         req.enables = rte_cpu_to_le_32(
2428                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2429
2430         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2431         size -= RTE_PKTMBUF_HEADROOM;
2432         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2433
2434         req.jumbo_thresh = rte_cpu_to_le_16(size);
2435         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2436
2437         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2438
2439         HWRM_CHECK_RESULT();
2440         HWRM_UNLOCK();
2441
2442         return rc;
2443 }
2444
2445 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2446                         struct bnxt_vnic_info *vnic, bool enable)
2447 {
2448         int rc = 0;
2449         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2450         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2451
2452         if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
2453                 if (enable)
2454                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2455                 return -ENOTSUP;
2456         }
2457
2458         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2459                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2460                 return 0;
2461         }
2462
2463         HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2464
2465         if (enable) {
2466                 req.enables = rte_cpu_to_le_32(
2467                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2468                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2469                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2470                 req.flags = rte_cpu_to_le_32(
2471                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2472                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2473                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2474                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2475                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2476                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2477                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2478                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2479                 req.min_agg_len = rte_cpu_to_le_32(512);
2480         }
2481         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2482
2483         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2484
2485         HWRM_CHECK_RESULT();
2486         HWRM_UNLOCK();
2487
2488         return rc;
2489 }
2490
2491 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2492 {
2493         struct hwrm_func_cfg_input req = {0};
2494         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2495         int rc;
2496
2497         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2498         req.enables = rte_cpu_to_le_32(
2499                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2500         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2501         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2502
2503         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2504
2505         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2506         HWRM_CHECK_RESULT();
2507         HWRM_UNLOCK();
2508
2509         bp->pf->vf_info[vf].random_mac = false;
2510
2511         return rc;
2512 }
2513
2514 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2515                                   uint64_t *dropped)
2516 {
2517         int rc = 0;
2518         struct hwrm_func_qstats_input req = {.req_type = 0};
2519         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2520
2521         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2522
2523         req.fid = rte_cpu_to_le_16(fid);
2524
2525         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2526
2527         HWRM_CHECK_RESULT();
2528
2529         if (dropped)
2530                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2531
2532         HWRM_UNLOCK();
2533
2534         return rc;
2535 }
2536
2537 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2538                           struct rte_eth_stats *stats,
2539                           struct hwrm_func_qstats_output *func_qstats)
2540 {
2541         int rc = 0;
2542         struct hwrm_func_qstats_input req = {.req_type = 0};
2543         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2544
2545         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2546
2547         req.fid = rte_cpu_to_le_16(fid);
2548
2549         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2550
2551         HWRM_CHECK_RESULT();
2552         if (func_qstats)
2553                 memcpy(func_qstats, resp,
2554                        sizeof(struct hwrm_func_qstats_output));
2555
2556         if (!stats)
2557                 goto exit;
2558
2559         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2560         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2561         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2562         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2563         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2564         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2565
2566         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2567         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2568         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2569         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2570         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2571         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2572
2573         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2574         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2575         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2576
2577 exit:
2578         HWRM_UNLOCK();
2579
2580         return rc;
2581 }
2582
2583 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2584 {
2585         int rc = 0;
2586         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2587         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2588
2589         HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2590
2591         req.fid = rte_cpu_to_le_16(fid);
2592
2593         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2594
2595         HWRM_CHECK_RESULT();
2596         HWRM_UNLOCK();
2597
2598         return rc;
2599 }
2600
2601 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2602 {
2603         unsigned int i;
2604         int rc = 0;
2605
2606         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2607                 struct bnxt_tx_queue *txq;
2608                 struct bnxt_rx_queue *rxq;
2609                 struct bnxt_cp_ring_info *cpr;
2610
2611                 if (i >= bp->rx_cp_nr_rings) {
2612                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2613                         cpr = txq->cp_ring;
2614                 } else {
2615                         rxq = bp->rx_queues[i];
2616                         cpr = rxq->cp_ring;
2617                 }
2618
2619                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2620                 if (rc)
2621                         return rc;
2622         }
2623         return 0;
2624 }
2625
2626 static int
2627 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2628 {
2629         int rc;
2630         unsigned int i;
2631         struct bnxt_cp_ring_info *cpr;
2632
2633         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2634
2635                 cpr = bp->rx_queues[i]->cp_ring;
2636                 if (BNXT_HAS_RING_GRPS(bp))
2637                         bp->grp_info[i].fw_stats_ctx = -1;
2638                 if (cpr == NULL)
2639                         continue;
2640                 rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
2641                 if (rc)
2642                         return rc;
2643         }
2644
2645         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2646                 cpr = bp->tx_queues[i]->cp_ring;
2647                 if (cpr == NULL)
2648                         continue;
2649                 rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
2650                 if (rc)
2651                         return rc;
2652         }
2653
2654         return 0;
2655 }
2656
2657 static int
2658 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2659 {
2660         uint16_t idx;
2661         uint32_t rc = 0;
2662
2663         if (!BNXT_HAS_RING_GRPS(bp))
2664                 return 0;
2665
2666         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2667
2668                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2669                         continue;
2670
2671                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2672
2673                 if (rc)
2674                         return rc;
2675         }
2676         return rc;
2677 }
2678
2679 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2680 {
2681         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2682
2683         bnxt_hwrm_ring_free(bp, cp_ring,
2684                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ,
2685                             INVALID_HW_RING_ID);
2686         memset(cpr->cp_desc_ring, 0,
2687                cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
2688         cpr->cp_raw_cons = 0;
2689 }
2690
2691 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2692 {
2693         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2694
2695         bnxt_hwrm_ring_free(bp, cp_ring,
2696                             HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL,
2697                             INVALID_HW_RING_ID);
2698         memset(cpr->cp_desc_ring, 0,
2699                cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring));
2700         cpr->cp_raw_cons = 0;
2701 }
2702
2703 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2704 {
2705         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2706         struct bnxt_rx_ring_info *rxr = rxq ? rxq->rx_ring : NULL;
2707         struct bnxt_ring *ring = rxr ? rxr->rx_ring_struct : NULL;
2708         struct bnxt_cp_ring_info *cpr = rxq ? rxq->cp_ring : NULL;
2709
2710         if (BNXT_HAS_RING_GRPS(bp))
2711                 bnxt_hwrm_ring_grp_free(bp, queue_index);
2712
2713         if (ring != NULL && cpr != NULL)
2714                 bnxt_hwrm_ring_free(bp, ring,
2715                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX,
2716                                     cpr->cp_ring_struct->fw_ring_id);
2717         if (BNXT_HAS_RING_GRPS(bp))
2718                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
2719
2720         /* Check agg ring struct explicitly.
2721          * bnxt_need_agg_ring() returns the current state of offload flags,
2722          * but we may have to deal with agg ring struct before the offload
2723          * flags are updated.
2724          */
2725         if (!bnxt_need_agg_ring(bp->eth_dev) ||
2726             (rxr && rxr->ag_ring_struct == NULL))
2727                 goto no_agg;
2728
2729         ring = rxr ? rxr->ag_ring_struct : NULL;
2730         if (ring != NULL && cpr != NULL) {
2731                 bnxt_hwrm_ring_free(bp, ring,
2732                                     BNXT_CHIP_P5(bp) ?
2733                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2734                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX,
2735                                     cpr->cp_ring_struct->fw_ring_id);
2736         }
2737         if (BNXT_HAS_RING_GRPS(bp))
2738                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
2739
2740 no_agg:
2741         if (cpr != NULL) {
2742                 bnxt_hwrm_stat_ctx_free(bp, cpr);
2743                 bnxt_free_cp_ring(bp, cpr);
2744         }
2745
2746         if (BNXT_HAS_RING_GRPS(bp))
2747                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2748 }
2749
2750 int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index)
2751 {
2752         int rc;
2753         struct hwrm_ring_reset_input req = {.req_type = 0 };
2754         struct hwrm_ring_reset_output *resp = bp->hwrm_cmd_resp_addr;
2755
2756         HWRM_PREP(&req, HWRM_RING_RESET, BNXT_USE_CHIMP_MB);
2757
2758         req.ring_type = HWRM_RING_RESET_INPUT_RING_TYPE_RX_RING_GRP;
2759         req.ring_id = rte_cpu_to_le_16(bp->grp_info[queue_index].fw_grp_id);
2760         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2761
2762         HWRM_CHECK_RESULT();
2763
2764         HWRM_UNLOCK();
2765
2766         return rc;
2767 }
2768
2769 static int
2770 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2771 {
2772         unsigned int i;
2773
2774         for (i = 0; i < bp->tx_cp_nr_rings; i++)
2775                 bnxt_free_hwrm_tx_ring(bp, i);
2776
2777         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2778                 bnxt_free_hwrm_rx_ring(bp, i);
2779
2780         return 0;
2781 }
2782
2783 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2784 {
2785         uint16_t i;
2786         uint32_t rc = 0;
2787
2788         if (!BNXT_HAS_RING_GRPS(bp))
2789                 return 0;
2790
2791         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2792                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2793                 if (rc)
2794                         return rc;
2795         }
2796         return rc;
2797 }
2798
2799 /*
2800  * HWRM utility functions
2801  */
2802
2803 void bnxt_free_hwrm_resources(struct bnxt *bp)
2804 {
2805         /* Release memzone */
2806         rte_free(bp->hwrm_cmd_resp_addr);
2807         rte_free(bp->hwrm_short_cmd_req_addr);
2808         bp->hwrm_cmd_resp_addr = NULL;
2809         bp->hwrm_short_cmd_req_addr = NULL;
2810         bp->hwrm_cmd_resp_dma_addr = 0;
2811         bp->hwrm_short_cmd_req_dma_addr = 0;
2812 }
2813
2814 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2815 {
2816         struct rte_pci_device *pdev = bp->pdev;
2817         char type[RTE_MEMZONE_NAMESIZE];
2818
2819         sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2820                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2821         bp->max_resp_len = BNXT_PAGE_SIZE;
2822         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2823         if (bp->hwrm_cmd_resp_addr == NULL)
2824                 return -ENOMEM;
2825         bp->hwrm_cmd_resp_dma_addr =
2826                 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2827         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2828                 PMD_DRV_LOG(ERR,
2829                         "unable to map response address to physical memory\n");
2830                 return -ENOMEM;
2831         }
2832         rte_spinlock_init(&bp->hwrm_lock);
2833
2834         return 0;
2835 }
2836
2837 int
2838 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2839 {
2840         int rc = 0;
2841
2842         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2843                 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2844                 if (rc)
2845                         return rc;
2846         } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2847                 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2848                 if (rc)
2849                         return rc;
2850         }
2851
2852         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2853         return rc;
2854 }
2855
2856 static int
2857 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2858 {
2859         struct bnxt_filter_info *filter;
2860         int rc = 0;
2861
2862         STAILQ_FOREACH(filter, &vnic->filter, next) {
2863                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2864                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2865                 bnxt_free_filter(bp, filter);
2866         }
2867         return rc;
2868 }
2869
2870 static int
2871 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2872 {
2873         struct bnxt_filter_info *filter;
2874         struct rte_flow *flow;
2875         int rc = 0;
2876
2877         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2878                 flow = STAILQ_FIRST(&vnic->flow_list);
2879                 filter = flow->filter;
2880                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2881                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2882
2883                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2884                 rte_free(flow);
2885         }
2886         return rc;
2887 }
2888
2889 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2890 {
2891         struct bnxt_filter_info *filter;
2892         int rc = 0;
2893
2894         STAILQ_FOREACH(filter, &vnic->filter, next) {
2895                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2896                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2897                                                      filter);
2898                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2899                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2900                                                          filter);
2901                 else
2902                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2903                                                      filter);
2904                 if (rc)
2905                         break;
2906         }
2907         return rc;
2908 }
2909
2910 static void
2911 bnxt_free_tunnel_ports(struct bnxt *bp)
2912 {
2913         if (bp->vxlan_port_cnt)
2914                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2915                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2916
2917         if (bp->geneve_port_cnt)
2918                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2919                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2920 }
2921
2922 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2923 {
2924         int i;
2925
2926         if (bp->vnic_info == NULL)
2927                 return;
2928
2929         /*
2930          * Cleanup VNICs in reverse order, to make sure the L2 filter
2931          * from vnic0 is last to be cleaned up.
2932          */
2933         for (i = bp->max_vnics - 1; i >= 0; i--) {
2934                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2935
2936                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2937                         continue;
2938
2939                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2940
2941                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2942
2943                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2944
2945                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2946
2947                 bnxt_hwrm_vnic_free(bp, vnic);
2948
2949                 rte_free(vnic->fw_grp_ids);
2950         }
2951         /* Ring resources */
2952         bnxt_free_all_hwrm_rings(bp);
2953         bnxt_free_all_hwrm_ring_grps(bp);
2954         bnxt_free_all_hwrm_stat_ctxs(bp);
2955         bnxt_free_tunnel_ports(bp);
2956 }
2957
2958 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2959 {
2960         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2961
2962         if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
2963                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2964
2965         switch (conf_link_speed) {
2966         case RTE_ETH_LINK_SPEED_10M_HD:
2967         case RTE_ETH_LINK_SPEED_100M_HD:
2968                 /* FALLTHROUGH */
2969                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2970         }
2971         return hw_link_duplex;
2972 }
2973
2974 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2975 {
2976         return !conf_link;
2977 }
2978
2979 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2980                                           uint16_t pam4_link)
2981 {
2982         uint16_t eth_link_speed = 0;
2983
2984         if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
2985                 return RTE_ETH_LINK_SPEED_AUTONEG;
2986
2987         switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
2988         case RTE_ETH_LINK_SPEED_100M:
2989         case RTE_ETH_LINK_SPEED_100M_HD:
2990                 /* FALLTHROUGH */
2991                 eth_link_speed =
2992                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2993                 break;
2994         case RTE_ETH_LINK_SPEED_1G:
2995                 eth_link_speed =
2996                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2997                 break;
2998         case RTE_ETH_LINK_SPEED_2_5G:
2999                 eth_link_speed =
3000                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
3001                 break;
3002         case RTE_ETH_LINK_SPEED_10G:
3003                 eth_link_speed =
3004                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
3005                 break;
3006         case RTE_ETH_LINK_SPEED_20G:
3007                 eth_link_speed =
3008                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
3009                 break;
3010         case RTE_ETH_LINK_SPEED_25G:
3011                 eth_link_speed =
3012                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
3013                 break;
3014         case RTE_ETH_LINK_SPEED_40G:
3015                 eth_link_speed =
3016                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3017                 break;
3018         case RTE_ETH_LINK_SPEED_50G:
3019                 eth_link_speed = pam4_link ?
3020                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
3021                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3022                 break;
3023         case RTE_ETH_LINK_SPEED_100G:
3024                 eth_link_speed = pam4_link ?
3025                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
3026                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3027                 break;
3028         case RTE_ETH_LINK_SPEED_200G:
3029                 eth_link_speed =
3030                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3031                 break;
3032         default:
3033                 PMD_DRV_LOG(ERR,
3034                         "Unsupported link speed %d; default to AUTO\n",
3035                         conf_link_speed);
3036                 break;
3037         }
3038         return eth_link_speed;
3039 }
3040
3041 #define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
3042                 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
3043                 RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
3044                 RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
3045                 RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
3046
3047 static int bnxt_validate_link_speed(struct bnxt *bp)
3048 {
3049         uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
3050         uint16_t port_id = bp->eth_dev->data->port_id;
3051         uint32_t link_speed_capa;
3052         uint32_t one_speed;
3053
3054         if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
3055                 return 0;
3056
3057         link_speed_capa = bnxt_get_speed_capabilities(bp);
3058
3059         if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
3060                 one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
3061
3062                 if (one_speed & (one_speed - 1)) {
3063                         PMD_DRV_LOG(ERR,
3064                                 "Invalid advertised speeds (%u) for port %u\n",
3065                                 link_speed, port_id);
3066                         return -EINVAL;
3067                 }
3068                 if ((one_speed & link_speed_capa) != one_speed) {
3069                         PMD_DRV_LOG(ERR,
3070                                 "Unsupported advertised speed (%u) for port %u\n",
3071                                 link_speed, port_id);
3072                         return -EINVAL;
3073                 }
3074         } else {
3075                 if (!(link_speed & link_speed_capa)) {
3076                         PMD_DRV_LOG(ERR,
3077                                 "Unsupported advertised speeds (%u) for port %u\n",
3078                                 link_speed, port_id);
3079                         return -EINVAL;
3080                 }
3081         }
3082         return 0;
3083 }
3084
3085 static uint16_t
3086 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
3087 {
3088         uint16_t ret = 0;
3089
3090         if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
3091                 if (bp->link_info->support_speeds)
3092                         return bp->link_info->support_speeds;
3093                 link_speed = BNXT_SUPPORTED_SPEEDS;
3094         }
3095
3096         if (link_speed & RTE_ETH_LINK_SPEED_100M)
3097                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3098         if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
3099                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3100         if (link_speed & RTE_ETH_LINK_SPEED_1G)
3101                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3102         if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
3103                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
3104         if (link_speed & RTE_ETH_LINK_SPEED_10G)
3105                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3106         if (link_speed & RTE_ETH_LINK_SPEED_20G)
3107                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
3108         if (link_speed & RTE_ETH_LINK_SPEED_25G)
3109                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
3110         if (link_speed & RTE_ETH_LINK_SPEED_40G)
3111                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
3112         if (link_speed & RTE_ETH_LINK_SPEED_50G)
3113                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
3114         if (link_speed & RTE_ETH_LINK_SPEED_100G)
3115                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
3116         if (link_speed & RTE_ETH_LINK_SPEED_200G)
3117                 ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3118         return ret;
3119 }
3120
3121 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
3122 {
3123         uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
3124
3125         switch (hw_link_speed) {
3126         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
3127                 eth_link_speed = RTE_ETH_SPEED_NUM_100M;
3128                 break;
3129         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
3130                 eth_link_speed = RTE_ETH_SPEED_NUM_1G;
3131                 break;
3132         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
3133                 eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
3134                 break;
3135         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
3136                 eth_link_speed = RTE_ETH_SPEED_NUM_10G;
3137                 break;
3138         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
3139                 eth_link_speed = RTE_ETH_SPEED_NUM_20G;
3140                 break;
3141         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
3142                 eth_link_speed = RTE_ETH_SPEED_NUM_25G;
3143                 break;
3144         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
3145                 eth_link_speed = RTE_ETH_SPEED_NUM_40G;
3146                 break;
3147         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
3148                 eth_link_speed = RTE_ETH_SPEED_NUM_50G;
3149                 break;
3150         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
3151                 eth_link_speed = RTE_ETH_SPEED_NUM_100G;
3152                 break;
3153         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
3154                 eth_link_speed = RTE_ETH_SPEED_NUM_200G;
3155                 break;
3156         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
3157         default:
3158                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
3159                         hw_link_speed);
3160                 break;
3161         }
3162         return eth_link_speed;
3163 }
3164
3165 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
3166 {
3167         uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3168
3169         switch (hw_link_duplex) {
3170         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3171         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3172                 /* FALLTHROUGH */
3173                 eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3174                 break;
3175         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3176                 eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
3177                 break;
3178         default:
3179                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3180                         hw_link_duplex);
3181                 break;
3182         }
3183         return eth_link_duplex;
3184 }
3185
3186 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3187 {
3188         int rc = 0;
3189         struct bnxt_link_info *link_info = bp->link_info;
3190
3191         rc = bnxt_hwrm_port_phy_qcaps(bp);
3192         if (rc)
3193                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3194
3195         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3196         if (rc) {
3197                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3198                 goto exit;
3199         }
3200
3201         if (link_info->link_speed)
3202                 link->link_speed =
3203                         bnxt_parse_hw_link_speed(link_info->link_speed);
3204         else
3205                 link->link_speed = RTE_ETH_SPEED_NUM_NONE;
3206         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3207         link->link_status = link_info->link_up;
3208         link->link_autoneg = link_info->auto_mode ==
3209                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3210                 RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
3211 exit:
3212         return rc;
3213 }
3214
3215 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3216 {
3217         int rc = 0;
3218         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3219         struct bnxt_link_info link_req;
3220         uint16_t speed, autoneg;
3221
3222         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3223                 return 0;
3224
3225         rc = bnxt_validate_link_speed(bp);
3226         if (rc)
3227                 goto error;
3228
3229         memset(&link_req, 0, sizeof(link_req));
3230         link_req.link_up = link_up;
3231         if (!link_up)
3232                 goto port_phy_cfg;
3233
3234         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3235         if (BNXT_CHIP_P5(bp) &&
3236             dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
3237                 /* 40G is not supported as part of media auto detect.
3238                  * The speed should be forced and autoneg disabled
3239                  * to configure 40G speed.
3240                  */
3241                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3242                 autoneg = 0;
3243         }
3244
3245         /* No auto speeds and no auto_pam4_link. Disable autoneg */
3246         if (bp->link_info->auto_link_speed == 0 &&
3247             bp->link_info->link_signal_mode &&
3248             bp->link_info->auto_pam4_link_speeds == 0)
3249                 autoneg = 0;
3250
3251         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3252                                           bp->link_info->link_signal_mode);
3253         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3254         /* Autoneg can be done only when the FW allows. */
3255         if (autoneg == 1 && bp->link_info->support_auto_speeds) {
3256                 link_req.phy_flags |=
3257                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3258                 link_req.auto_link_speed_mask =
3259                         bnxt_parse_eth_link_speed_mask(bp,
3260                                                        dev_conf->link_speeds);
3261                 link_req.auto_pam4_link_speeds =
3262                         bp->link_info->auto_pam4_link_speeds;
3263         } else {
3264                 if (bp->link_info->phy_type ==
3265                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3266                     bp->link_info->phy_type ==
3267                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3268                     bp->link_info->media_type ==
3269                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3270                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3271                         return -EINVAL;
3272                 }
3273
3274                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3275                 /* If user wants a particular speed try that first. */
3276                 if (speed)
3277                         link_req.link_speed = speed;
3278                 else if (bp->link_info->force_pam4_link_speed)
3279                         link_req.link_speed =
3280                                 bp->link_info->force_pam4_link_speed;
3281                 else if (bp->link_info->auto_pam4_link_speeds)
3282                         link_req.link_speed =
3283                                 bp->link_info->auto_pam4_link_speeds;
3284                 else if (bp->link_info->support_pam4_speeds)
3285                         link_req.link_speed =
3286                                 bp->link_info->support_pam4_speeds;
3287                 else if (bp->link_info->force_link_speed)
3288                         link_req.link_speed = bp->link_info->force_link_speed;
3289                 else
3290                         link_req.link_speed = bp->link_info->auto_link_speed;
3291                 /* Auto PAM4 link speed is zero, but auto_link_speed is not
3292                  * zero. Use the auto_link_speed.
3293                  */
3294                 if (bp->link_info->auto_link_speed != 0 &&
3295                     bp->link_info->auto_pam4_link_speeds == 0)
3296                         link_req.link_speed = bp->link_info->auto_link_speed;
3297         }
3298         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3299         link_req.auto_pause = bp->link_info->auto_pause;
3300         link_req.force_pause = bp->link_info->force_pause;
3301
3302 port_phy_cfg:
3303         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3304         if (rc) {
3305                 PMD_DRV_LOG(ERR,
3306                         "Set link config failed with rc %d\n", rc);
3307         }
3308
3309 error:
3310         return rc;
3311 }
3312
3313 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3314 {
3315         struct hwrm_func_qcfg_input req = {0};
3316         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3317         uint16_t flags;
3318         int rc = 0;
3319         bp->func_svif = BNXT_SVIF_INVALID;
3320         uint16_t svif_info;
3321
3322         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3323         req.fid = rte_cpu_to_le_16(0xffff);
3324
3325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3326
3327         HWRM_CHECK_RESULT();
3328
3329         bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
3330
3331         svif_info = rte_le_to_cpu_16(resp->svif_info);
3332         if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3333                 bp->func_svif = svif_info &
3334                                      HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3335
3336         flags = rte_le_to_cpu_16(resp->flags);
3337         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3338                 bp->flags |= BNXT_FLAG_MULTI_HOST;
3339
3340         if (BNXT_VF(bp) &&
3341             !BNXT_VF_IS_TRUSTED(bp) &&
3342             (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3343                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3344                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3345         } else if (BNXT_VF(bp) &&
3346                    BNXT_VF_IS_TRUSTED(bp) &&
3347                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3348                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3349                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3350         }
3351
3352         if (mtu)
3353                 *mtu = rte_le_to_cpu_16(resp->admin_mtu);
3354
3355         switch (resp->port_partition_type) {
3356         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3357         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3358         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3359                 /* FALLTHROUGH */
3360                 bp->flags |= BNXT_FLAG_NPAR_PF;
3361                 break;
3362         default:
3363                 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3364                 break;
3365         }
3366
3367         bp->legacy_db_size =
3368                 rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
3369
3370         HWRM_UNLOCK();
3371
3372         return rc;
3373 }
3374
3375 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3376 {
3377         struct hwrm_func_qcfg_input req = {0};
3378         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3379         uint16_t flags;
3380         int rc;
3381
3382         if (!BNXT_VF_IS_TRUSTED(bp))
3383                 return 0;
3384
3385         if (!bp->parent)
3386                 return -EINVAL;
3387
3388         bp->parent->fid = BNXT_PF_FID_INVALID;
3389
3390         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3391
3392         req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3393
3394         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3395
3396         HWRM_CHECK_RESULT_SILENT();
3397
3398         memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3399         bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3400         bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3401         bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3402
3403         flags = rte_le_to_cpu_16(resp->flags);
3404         /* check for the multi-root support */
3405         if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_ROOT) {
3406                 bp->flags2 |= BNXT_FLAGS2_MULTIROOT_EN;
3407                 PMD_DRV_LOG(DEBUG, "PF enabled with multi root capability\n");
3408         }
3409
3410         HWRM_UNLOCK();
3411
3412         return 0;
3413 }
3414
3415 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3416                                  uint16_t *vnic_id, uint16_t *svif)
3417 {
3418         struct hwrm_func_qcfg_input req = {0};
3419         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3420         uint16_t svif_info;
3421         int rc = 0;
3422
3423         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3424         req.fid = rte_cpu_to_le_16(fid);
3425
3426         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3427
3428         HWRM_CHECK_RESULT();
3429
3430         if (vnic_id)
3431                 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3432
3433         svif_info = rte_le_to_cpu_16(resp->svif_info);
3434         if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3435                 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3436
3437         HWRM_UNLOCK();
3438
3439         return rc;
3440 }
3441
3442 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3443 {
3444         struct hwrm_port_mac_qcfg_input req = {0};
3445         struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3446         uint16_t port_svif_info;
3447         int rc;
3448
3449         bp->port_svif = BNXT_SVIF_INVALID;
3450
3451         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3452                 return 0;
3453
3454         HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3455
3456         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3457
3458         HWRM_CHECK_RESULT_SILENT();
3459
3460         port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3461         if (port_svif_info &
3462             HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3463                 bp->port_svif = port_svif_info &
3464                         HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3465
3466         HWRM_UNLOCK();
3467
3468         return 0;
3469 }
3470
3471 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
3472                                  struct bnxt_pf_resource_info *pf_resc)
3473 {
3474         struct hwrm_func_cfg_input req = {0};
3475         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3476         uint32_t enables;
3477         int rc;
3478
3479         enables = HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU |
3480                   HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU |
3481                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3482                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3483                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3484                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3485                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3486                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3487                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3488                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3489
3490         if (BNXT_HAS_RING_GRPS(bp)) {
3491                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3492                 req.num_hw_ring_grps =
3493                         rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
3494         } else if (BNXT_HAS_NQ(bp)) {
3495                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3496                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3497         }
3498
3499         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3500         req.admin_mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3501         req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu);
3502         req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3503         req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
3504         req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
3505         req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
3506         req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
3507         req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
3508         req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
3509         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3510         req.fid = rte_cpu_to_le_16(0xffff);
3511         req.enables = rte_cpu_to_le_32(enables);
3512
3513         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3514
3515         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3516
3517         HWRM_CHECK_RESULT();
3518         HWRM_UNLOCK();
3519
3520         return rc;
3521 }
3522
3523 /* min values are the guaranteed resources and max values are subject
3524  * to availability. The strategy for now is to keep both min & max
3525  * values the same.
3526  */
3527 static void
3528 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
3529                               struct hwrm_func_vf_resource_cfg_input *req,
3530                               int num_vfs)
3531 {
3532         req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3533                                                (num_vfs + 1));
3534         req->min_rsscos_ctx = req->max_rsscos_ctx;
3535         req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3536         req->min_stat_ctx = req->max_stat_ctx;
3537         req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3538                                                (num_vfs + 1));
3539         req->min_cmpl_rings = req->max_cmpl_rings;
3540         req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3541         req->min_tx_rings = req->max_tx_rings;
3542         req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3543         req->min_rx_rings = req->max_rx_rings;
3544         req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3545         req->min_l2_ctxs = req->max_l2_ctxs;
3546         /* TODO: For now, do not support VMDq/RFS on VFs. */
3547         req->max_vnics = rte_cpu_to_le_16(1);
3548         req->min_vnics = req->max_vnics;
3549         req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3550                                                  (num_vfs + 1));
3551         req->min_hw_ring_grps = req->max_hw_ring_grps;
3552         req->flags =
3553          rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
3554 }
3555
3556 static void
3557 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
3558                               struct hwrm_func_cfg_input *req,
3559                               int num_vfs)
3560 {
3561         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU |
3562                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3563                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3564                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3565                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3566                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3567                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3568                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3569                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3570                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3571
3572         req->admin_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3573                                           RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN *
3574                                           BNXT_NUM_VLANS);
3575         req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3576         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3577                                                 (num_vfs + 1));
3578         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3579         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3580                                                (num_vfs + 1));
3581         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3582         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3583         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3584         /* TODO: For now, do not support VMDq/RFS on VFs. */
3585         req->num_vnics = rte_cpu_to_le_16(1);
3586         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3587                                                  (num_vfs + 1));
3588 }
3589
3590 /* Update the port wide resource values based on how many resources
3591  * got allocated to the VF.
3592  */
3593 static int bnxt_update_max_resources(struct bnxt *bp,
3594                                      int vf)
3595 {
3596         struct hwrm_func_qcfg_input req = {0};
3597         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3598         int rc;
3599
3600         /* Get the actual allocated values now */
3601         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3602         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3603         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3604         HWRM_CHECK_RESULT();
3605
3606         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3607         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
3608         bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3609         bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
3610         bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
3611         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
3612         bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3613
3614         HWRM_UNLOCK();
3615
3616         return 0;
3617 }
3618
3619 /* Update the PF resource values based on how many resources
3620  * got allocated to it.
3621  */
3622 static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
3623 {
3624         struct hwrm_func_qcfg_input req = {0};
3625         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3626         int rc;
3627
3628         /* Get the actual allocated values now */
3629         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3630         req.fid = rte_cpu_to_le_16(0xffff);
3631         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3632         HWRM_CHECK_RESULT();
3633
3634         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3635         bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3636         bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3637         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3638         bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3639         bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3640         bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3641         bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
3642
3643         HWRM_UNLOCK();
3644
3645         return 0;
3646 }
3647
3648 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3649 {
3650         struct hwrm_func_qcfg_input req = {0};
3651         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3652         int rc;
3653
3654         /* Check for zero MAC address */
3655         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3656         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3657         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3658         HWRM_CHECK_RESULT();
3659         rc = rte_le_to_cpu_16(resp->vlan);
3660
3661         HWRM_UNLOCK();
3662
3663         return rc;
3664 }
3665
3666 static int bnxt_query_pf_resources(struct bnxt *bp,
3667                                    struct bnxt_pf_resource_info *pf_resc)
3668 {
3669         struct hwrm_func_qcfg_input req = {0};
3670         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3671         int rc;
3672
3673         /* And copy the allocated numbers into the pf struct */
3674         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3675         req.fid = rte_cpu_to_le_16(0xffff);
3676         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3677         HWRM_CHECK_RESULT();
3678
3679         pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3680         pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3681         pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3682         pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3683         pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3684         pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3685         pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
3686         bp->pf->evb_mode = resp->evb_mode;
3687
3688         HWRM_UNLOCK();
3689
3690         return rc;
3691 }
3692
3693 static void
3694 bnxt_calculate_pf_resources(struct bnxt *bp,
3695                             struct bnxt_pf_resource_info *pf_resc,
3696                             int num_vfs)
3697 {
3698         if (!num_vfs) {
3699                 pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
3700                 pf_resc->num_stat_ctxs = bp->max_stat_ctx;
3701                 pf_resc->num_cp_rings = bp->max_cp_rings;
3702                 pf_resc->num_tx_rings = bp->max_tx_rings;
3703                 pf_resc->num_rx_rings = bp->max_rx_rings;
3704                 pf_resc->num_l2_ctxs = bp->max_l2_ctx;
3705                 pf_resc->num_hw_ring_grps = bp->max_ring_grps;
3706
3707                 return;
3708         }
3709
3710         pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
3711                                    bp->max_rsscos_ctx % (num_vfs + 1);
3712         pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
3713                                  bp->max_stat_ctx % (num_vfs + 1);
3714         pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
3715                                 bp->max_cp_rings % (num_vfs + 1);
3716         pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
3717                                 bp->max_tx_rings % (num_vfs + 1);
3718         pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
3719                                 bp->max_rx_rings % (num_vfs + 1);
3720         pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
3721                                bp->max_l2_ctx % (num_vfs + 1);
3722         pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
3723                                     bp->max_ring_grps % (num_vfs + 1);
3724 }
3725
3726 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3727 {
3728         struct bnxt_pf_resource_info pf_resc = { 0 };
3729         int rc;
3730
3731         if (!BNXT_PF(bp)) {
3732                 PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3733                 return -EINVAL;
3734         }
3735
3736         rc = bnxt_hwrm_func_qcaps(bp);
3737         if (rc)
3738                 return rc;
3739
3740         bnxt_calculate_pf_resources(bp, &pf_resc, 0);
3741
3742         bp->pf->func_cfg_flags &=
3743                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3744                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3745         bp->pf->func_cfg_flags |=
3746                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3747
3748         rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
3749         if (rc)
3750                 return rc;
3751
3752         rc = bnxt_update_max_resources_pf_only(bp);
3753
3754         return rc;
3755 }
3756
3757 static int
3758 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
3759 {
3760         size_t req_buf_sz, sz;
3761         int i, rc;
3762
3763         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3764         bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3765                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3766         if (bp->pf->vf_req_buf == NULL) {
3767                 return -ENOMEM;
3768         }
3769
3770         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3771                 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3772
3773         for (i = 0; i < num_vfs; i++)
3774                 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3775                                              (i * HWRM_MAX_REQ_LEN);
3776
3777         rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
3778         if (rc)
3779                 rte_free(bp->pf->vf_req_buf);
3780
3781         return rc;
3782 }
3783
3784 static int
3785 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
3786 {
3787         struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3788         struct hwrm_func_vf_resource_cfg_input req = {0};
3789         int i, rc = 0;
3790
3791         bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
3792         bp->pf->active_vfs = 0;
3793         for (i = 0; i < num_vfs; i++) {
3794                 HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
3795                 req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3796                 rc = bnxt_hwrm_send_message(bp,
3797                                             &req,
3798                                             sizeof(req),
3799                                             BNXT_USE_CHIMP_MB);
3800                 if (rc || resp->error_code) {
3801                         PMD_DRV_LOG(ERR,
3802                                 "Failed to initialize VF %d\n", i);
3803                         PMD_DRV_LOG(ERR,
3804                                 "Not all VFs available. (%d, %d)\n",
3805                                 rc, resp->error_code);
3806                         HWRM_UNLOCK();
3807
3808                         /* If the first VF configuration itself fails,
3809                          * unregister the vf_fwd_request buffer.
3810                          */
3811                         if (i == 0)
3812                                 bnxt_hwrm_func_buf_unrgtr(bp);
3813                         break;
3814                 }
3815                 HWRM_UNLOCK();
3816
3817                 /* Update the max resource values based on the resource values
3818                  * allocated to the VF.
3819                  */
3820                 bnxt_update_max_resources(bp, i);
3821                 bp->pf->active_vfs++;
3822                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3823         }
3824
3825         return 0;
3826 }
3827
3828 static int
3829 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
3830 {
3831         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3832         struct hwrm_func_cfg_input req = {0};
3833         int i, rc;
3834
3835         bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
3836
3837         bp->pf->active_vfs = 0;
3838         for (i = 0; i < num_vfs; i++) {
3839                 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3840                 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3841                 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3842                 rc = bnxt_hwrm_send_message(bp,
3843                                             &req,
3844                                             sizeof(req),
3845                                             BNXT_USE_CHIMP_MB);
3846
3847                 /* Clear enable flag for next pass */
3848                 req.enables &= ~rte_cpu_to_le_32(
3849                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3850
3851                 if (rc || resp->error_code) {
3852                         PMD_DRV_LOG(ERR,
3853                                 "Failed to initialize VF %d\n", i);
3854                         PMD_DRV_LOG(ERR,
3855                                 "Not all VFs available. (%d, %d)\n",
3856                                 rc, resp->error_code);
3857                         HWRM_UNLOCK();
3858
3859                         /* If the first VF configuration itself fails,
3860                          * unregister the vf_fwd_request buffer.
3861                          */
3862                         if (i == 0)
3863                                 bnxt_hwrm_func_buf_unrgtr(bp);
3864                         break;
3865                 }
3866
3867                 HWRM_UNLOCK();
3868
3869                 /* Update the max resource values based on the resource values
3870                  * allocated to the VF.
3871                  */
3872                 bnxt_update_max_resources(bp, i);
3873                 bp->pf->active_vfs++;
3874                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3875         }
3876
3877         return 0;
3878 }
3879
3880 static void
3881 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
3882 {
3883         if (bp->flags & BNXT_FLAG_NEW_RM)
3884                 bnxt_process_vf_resc_config_new(bp, num_vfs);
3885         else
3886                 bnxt_process_vf_resc_config_old(bp, num_vfs);
3887 }
3888
3889 static void
3890 bnxt_update_pf_resources(struct bnxt *bp,
3891                          struct bnxt_pf_resource_info *pf_resc)
3892 {
3893         bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
3894         bp->max_stat_ctx = pf_resc->num_stat_ctxs;
3895         bp->max_cp_rings = pf_resc->num_cp_rings;
3896         bp->max_tx_rings = pf_resc->num_tx_rings;
3897         bp->max_rx_rings = pf_resc->num_rx_rings;
3898         bp->max_ring_grps = pf_resc->num_hw_ring_grps;
3899 }
3900
3901 static int32_t
3902 bnxt_configure_pf_resources(struct bnxt *bp,
3903                             struct bnxt_pf_resource_info *pf_resc)
3904 {
3905         /*
3906          * We're using STD_TX_RING_MODE here which will limit the TX
3907          * rings. This will allow QoS to function properly. Not setting this
3908          * will cause PF rings to break bandwidth settings.
3909          */
3910         bp->pf->func_cfg_flags &=
3911                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3912                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3913         bp->pf->func_cfg_flags |=
3914                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3915         return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
3916 }
3917
3918 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3919 {
3920         struct bnxt_pf_resource_info pf_resc = { 0 };
3921         int rc;
3922
3923         if (!BNXT_PF(bp)) {
3924                 PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3925                 return -EINVAL;
3926         }
3927
3928         rc = bnxt_hwrm_func_qcaps(bp);
3929         if (rc)
3930                 return rc;
3931
3932         bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
3933
3934         rc = bnxt_configure_pf_resources(bp, &pf_resc);
3935         if (rc)
3936                 return rc;
3937
3938         rc = bnxt_query_pf_resources(bp, &pf_resc);
3939         if (rc)
3940                 return rc;
3941
3942         /*
3943          * Now, create and register a buffer to hold forwarded VF requests
3944          */
3945         rc = bnxt_configure_vf_req_buf(bp, num_vfs);
3946         if (rc)
3947                 return rc;
3948
3949         bnxt_configure_vf_resources(bp, num_vfs);
3950
3951         bnxt_update_pf_resources(bp, &pf_resc);
3952
3953         return 0;
3954 }
3955
3956 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3957 {
3958         struct hwrm_func_cfg_input req = {0};
3959         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3960         int rc;
3961
3962         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3963
3964         req.fid = rte_cpu_to_le_16(0xffff);
3965         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3966         req.evb_mode = bp->pf->evb_mode;
3967
3968         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3969         HWRM_CHECK_RESULT();
3970         HWRM_UNLOCK();
3971
3972         return rc;
3973 }
3974
3975 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3976                                 uint8_t tunnel_type)
3977 {
3978         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3979         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3980         int rc = 0;
3981
3982         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3983         req.tunnel_type = tunnel_type;
3984         req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3985         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3986         HWRM_CHECK_RESULT();
3987
3988         switch (tunnel_type) {
3989         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3990                 bp->vxlan_fw_dst_port_id =
3991                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3992                 bp->vxlan_port = port;
3993                 break;
3994         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3995                 bp->geneve_fw_dst_port_id =
3996                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3997                 bp->geneve_port = port;
3998                 break;
3999         default:
4000                 break;
4001         }
4002
4003         HWRM_UNLOCK();
4004
4005         return rc;
4006 }
4007
4008 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
4009                                 uint8_t tunnel_type)
4010 {
4011         struct hwrm_tunnel_dst_port_free_input req = {0};
4012         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
4013         int rc = 0;
4014
4015         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
4016
4017         req.tunnel_type = tunnel_type;
4018         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
4019         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4020
4021         HWRM_CHECK_RESULT();
4022         HWRM_UNLOCK();
4023
4024         if (tunnel_type ==
4025             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
4026                 bp->vxlan_port = 0;
4027                 bp->vxlan_port_cnt = 0;
4028         }
4029
4030         if (tunnel_type ==
4031             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
4032                 bp->geneve_port = 0;
4033                 bp->geneve_port_cnt = 0;
4034         }
4035
4036         return rc;
4037 }
4038
4039 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
4040                                         uint32_t flags)
4041 {
4042         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4043         struct hwrm_func_cfg_input req = {0};
4044         int rc;
4045
4046         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4047
4048         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4049         req.flags = rte_cpu_to_le_32(flags);
4050         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4051
4052         HWRM_CHECK_RESULT();
4053         HWRM_UNLOCK();
4054
4055         return rc;
4056 }
4057
4058 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
4059 {
4060         uint32_t *flag = flagp;
4061
4062         vnic->flags = *flag;
4063 }
4064
4065 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4066 {
4067         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
4068 }
4069
4070 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
4071 {
4072         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4073         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
4074         int rc;
4075
4076         HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
4077
4078         req.req_buf_num_pages = rte_cpu_to_le_16(1);
4079         req.req_buf_page_size =
4080                 rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
4081         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
4082         req.req_buf_page_addr0 =
4083                 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
4084         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
4085                 PMD_DRV_LOG(ERR,
4086                         "unable to map buffer address to physical memory\n");
4087                 HWRM_UNLOCK();
4088                 return -ENOMEM;
4089         }
4090
4091         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4092
4093         HWRM_CHECK_RESULT();
4094         HWRM_UNLOCK();
4095
4096         return rc;
4097 }
4098
4099 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
4100 {
4101         int rc = 0;
4102         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
4103         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
4104
4105         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
4106                 return 0;
4107
4108         HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
4109
4110         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4111
4112         HWRM_CHECK_RESULT();
4113         HWRM_UNLOCK();
4114
4115         return rc;
4116 }
4117
4118 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
4119 {
4120         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4121         struct hwrm_func_cfg_input req = {0};
4122         int rc;
4123
4124         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4125
4126         req.fid = rte_cpu_to_le_16(0xffff);
4127         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
4128         req.enables = rte_cpu_to_le_32(
4129                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
4130         req.async_event_cr = rte_cpu_to_le_16(
4131                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4132         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4133
4134         HWRM_CHECK_RESULT();
4135         HWRM_UNLOCK();
4136
4137         return rc;
4138 }
4139
4140 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
4141 {
4142         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4143         struct hwrm_func_vf_cfg_input req = {0};
4144         int rc;
4145
4146         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4147
4148         req.enables = rte_cpu_to_le_32(
4149                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
4150         req.async_event_cr = rte_cpu_to_le_16(
4151                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4152         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4153
4154         HWRM_CHECK_RESULT();
4155         HWRM_UNLOCK();
4156
4157         return rc;
4158 }
4159
4160 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
4161 {
4162         struct hwrm_func_cfg_input req = {0};
4163         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4164         uint16_t dflt_vlan, fid;
4165         uint32_t func_cfg_flags;
4166         int rc = 0;
4167
4168         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4169
4170         if (is_vf) {
4171                 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
4172                 fid = bp->pf->vf_info[vf].fid;
4173                 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
4174         } else {
4175                 fid = rte_cpu_to_le_16(0xffff);
4176                 func_cfg_flags = bp->pf->func_cfg_flags;
4177                 dflt_vlan = bp->vlan;
4178         }
4179
4180         req.flags = rte_cpu_to_le_32(func_cfg_flags);
4181         req.fid = rte_cpu_to_le_16(fid);
4182         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4183         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
4184
4185         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4186
4187         HWRM_CHECK_RESULT();
4188         HWRM_UNLOCK();
4189
4190         return rc;
4191 }
4192
4193 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
4194                         uint16_t max_bw, uint16_t enables)
4195 {
4196         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4197         struct hwrm_func_cfg_input req = {0};
4198         int rc;
4199
4200         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4201
4202         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4203         req.enables |= rte_cpu_to_le_32(enables);
4204         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4205         req.max_bw = rte_cpu_to_le_32(max_bw);
4206         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4207
4208         HWRM_CHECK_RESULT();
4209         HWRM_UNLOCK();
4210
4211         return rc;
4212 }
4213
4214 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
4215 {
4216         struct hwrm_func_cfg_input req = {0};
4217         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4218         int rc = 0;
4219
4220         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4221
4222         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4223         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4224         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4225         req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
4226
4227         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4228
4229         HWRM_CHECK_RESULT();
4230         HWRM_UNLOCK();
4231
4232         return rc;
4233 }
4234
4235 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
4236 {
4237         int rc;
4238
4239         if (BNXT_PF(bp))
4240                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
4241         else
4242                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
4243
4244         return rc;
4245 }
4246
4247 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
4248                               void *encaped, size_t ec_size)
4249 {
4250         int rc = 0;
4251         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
4252         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4253
4254         if (ec_size > sizeof(req.encap_request))
4255                 return -1;
4256
4257         HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
4258
4259         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4260         memcpy(req.encap_request, encaped, ec_size);
4261
4262         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4263
4264         HWRM_CHECK_RESULT();
4265         HWRM_UNLOCK();
4266
4267         return rc;
4268 }
4269
4270 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
4271                                        struct rte_ether_addr *mac)
4272 {
4273         struct hwrm_func_qcfg_input req = {0};
4274         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4275         int rc;
4276
4277         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4278
4279         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4280         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4281
4282         HWRM_CHECK_RESULT();
4283
4284         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
4285
4286         HWRM_UNLOCK();
4287
4288         return rc;
4289 }
4290
4291 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
4292                             void *encaped, size_t ec_size)
4293 {
4294         int rc = 0;
4295         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
4296         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4297
4298         if (ec_size > sizeof(req.encap_request))
4299                 return -1;
4300
4301         HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
4302
4303         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4304         memcpy(req.encap_request, encaped, ec_size);
4305
4306         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4307
4308         HWRM_CHECK_RESULT();
4309         HWRM_UNLOCK();
4310
4311         return rc;
4312 }
4313
4314 static void bnxt_update_prev_stat(uint64_t *cntr, uint64_t *prev_cntr)
4315 {
4316         /* One of the HW stat values that make up this counter was zero as
4317          * returned by HW in this iteration, so use the previous
4318          * iteration's counter value
4319          */
4320         if (*prev_cntr && *cntr == 0)
4321                 *cntr = *prev_cntr;
4322         else
4323                 *prev_cntr = *cntr;
4324 }
4325
4326 int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
4327                          struct bnxt_ring_stats *ring_stats, bool rx)
4328 {
4329         int rc = 0;
4330         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
4331         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
4332
4333         HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
4334
4335         req.stat_ctx_id = rte_cpu_to_le_32(cid);
4336
4337         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4338
4339         HWRM_CHECK_RESULT();
4340
4341         if (rx) {
4342                 struct bnxt_ring_stats *prev_stats = &bp->prev_rx_ring_stats[idx];
4343
4344                 ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4345                 bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts,
4346                                       &prev_stats->rx_ucast_pkts);
4347
4348                 ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts);
4349                 bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts,
4350                                       &prev_stats->rx_mcast_pkts);
4351
4352                 ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts);
4353                 bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts,
4354                                       &prev_stats->rx_bcast_pkts);
4355
4356                 ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4357                 bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes,
4358                                       &prev_stats->rx_ucast_bytes);
4359
4360                 ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes);
4361                 bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes,
4362                                       &prev_stats->rx_mcast_bytes);
4363
4364                 ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes);
4365                 bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes,
4366                                       &prev_stats->rx_bcast_bytes);
4367
4368                 ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts);
4369                 bnxt_update_prev_stat(&ring_stats->rx_discard_pkts,
4370                                       &prev_stats->rx_discard_pkts);
4371
4372                 ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts);
4373                 bnxt_update_prev_stat(&ring_stats->rx_error_pkts,
4374                                       &prev_stats->rx_error_pkts);
4375
4376                 ring_stats->rx_agg_pkts = rte_le_to_cpu_64(resp->rx_agg_pkts);
4377                 bnxt_update_prev_stat(&ring_stats->rx_agg_pkts,
4378                                       &prev_stats->rx_agg_pkts);
4379
4380                 ring_stats->rx_agg_bytes = rte_le_to_cpu_64(resp->rx_agg_bytes);
4381                 bnxt_update_prev_stat(&ring_stats->rx_agg_bytes,
4382                                       &prev_stats->rx_agg_bytes);
4383
4384                 ring_stats->rx_agg_events = rte_le_to_cpu_64(resp->rx_agg_events);
4385                 bnxt_update_prev_stat(&ring_stats->rx_agg_events,
4386                                       &prev_stats->rx_agg_events);
4387
4388                 ring_stats->rx_agg_aborts = rte_le_to_cpu_64(resp->rx_agg_aborts);
4389                 bnxt_update_prev_stat(&ring_stats->rx_agg_aborts,
4390                                       &prev_stats->rx_agg_aborts);
4391         } else {
4392                 struct bnxt_ring_stats *prev_stats = &bp->prev_tx_ring_stats[idx];
4393
4394                 ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4395                 bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts,
4396                                       &prev_stats->tx_ucast_pkts);
4397
4398                 ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts);
4399                 bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts,
4400                                       &prev_stats->tx_mcast_pkts);
4401
4402                 ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts);
4403                 bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts,
4404                                       &prev_stats->tx_bcast_pkts);
4405
4406                 ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4407                 bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes,
4408                                       &prev_stats->tx_ucast_bytes);
4409
4410                 ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes);
4411                 bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes,
4412                                       &prev_stats->tx_mcast_bytes);
4413
4414                 ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes);
4415                 bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes,
4416                                       &prev_stats->tx_bcast_bytes);
4417
4418                 ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts);
4419                 bnxt_update_prev_stat(&ring_stats->tx_discard_pkts,
4420                                       &prev_stats->tx_discard_pkts);
4421         }
4422
4423         HWRM_UNLOCK();
4424
4425         return rc;
4426 }
4427
4428 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4429 {
4430         struct hwrm_port_qstats_input req = {0};
4431         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4432         struct bnxt_pf_info *pf = bp->pf;
4433         int rc;
4434
4435         HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4436
4437         req.port_id = rte_cpu_to_le_16(pf->port_id);
4438         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4439         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4440         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4441
4442         HWRM_CHECK_RESULT();
4443         HWRM_UNLOCK();
4444
4445         return rc;
4446 }
4447
4448 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4449 {
4450         struct hwrm_port_clr_stats_input req = {0};
4451         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4452         struct bnxt_pf_info *pf = bp->pf;
4453         int rc;
4454
4455         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
4456         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4457             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4458                 return 0;
4459
4460         HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4461
4462         req.port_id = rte_cpu_to_le_16(pf->port_id);
4463         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4464
4465         HWRM_CHECK_RESULT();
4466         HWRM_UNLOCK();
4467
4468         return rc;
4469 }
4470
4471 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4472 {
4473         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4474         struct hwrm_port_led_qcaps_input req = {0};
4475         int rc;
4476
4477         if (BNXT_VF(bp))
4478                 return 0;
4479
4480         HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4481         req.port_id = bp->pf->port_id;
4482         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4483
4484         HWRM_CHECK_RESULT_SILENT();
4485
4486         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4487                 unsigned int i;
4488
4489                 bp->leds->num_leds = resp->num_leds;
4490                 memcpy(bp->leds, &resp->led0_id,
4491                         sizeof(bp->leds[0]) * bp->leds->num_leds);
4492                 for (i = 0; i < bp->leds->num_leds; i++) {
4493                         struct bnxt_led_info *led = &bp->leds[i];
4494
4495                         uint16_t caps = led->led_state_caps;
4496
4497                         if (!led->led_group_id ||
4498                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4499                                 bp->leds->num_leds = 0;
4500                                 break;
4501                         }
4502                 }
4503         }
4504
4505         HWRM_UNLOCK();
4506
4507         return rc;
4508 }
4509
4510 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4511 {
4512         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4513         struct hwrm_port_led_cfg_input req = {0};
4514         struct bnxt_led_cfg *led_cfg;
4515         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4516         uint16_t duration = 0;
4517         int rc, i;
4518
4519         if (!bp->leds->num_leds || BNXT_VF(bp))
4520                 return -EOPNOTSUPP;
4521
4522         HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4523
4524         if (led_on) {
4525                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4526                 duration = rte_cpu_to_le_16(500);
4527         }
4528         req.port_id = bp->pf->port_id;
4529         req.num_leds = bp->leds->num_leds;
4530         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4531         for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4532                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4533                 led_cfg->led_id = bp->leds[i].led_id;
4534                 led_cfg->led_state = led_state;
4535                 led_cfg->led_blink_on = duration;
4536                 led_cfg->led_blink_off = duration;
4537                 led_cfg->led_group_id = bp->leds[i].led_group_id;
4538         }
4539
4540         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4541
4542         HWRM_CHECK_RESULT();
4543         HWRM_UNLOCK();
4544
4545         return rc;
4546 }
4547
4548 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4549                                uint32_t *length)
4550 {
4551         int rc;
4552         struct hwrm_nvm_get_dir_info_input req = {0};
4553         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4554
4555         HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4556
4557         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4558
4559         HWRM_CHECK_RESULT();
4560
4561         *entries = rte_le_to_cpu_32(resp->entries);
4562         *length = rte_le_to_cpu_32(resp->entry_length);
4563
4564         HWRM_UNLOCK();
4565         return rc;
4566 }
4567
4568 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4569 {
4570         int rc;
4571         uint32_t dir_entries;
4572         uint32_t entry_length;
4573         uint8_t *buf;
4574         size_t buflen;
4575         rte_iova_t dma_handle;
4576         struct hwrm_nvm_get_dir_entries_input req = {0};
4577         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4578
4579         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4580         if (rc != 0)
4581                 return rc;
4582
4583         *data++ = dir_entries;
4584         *data++ = entry_length;
4585         len -= 2;
4586         memset(data, 0xff, len);
4587
4588         buflen = dir_entries * entry_length;
4589         buf = rte_malloc("nvm_dir", buflen, 0);
4590         if (buf == NULL)
4591                 return -ENOMEM;
4592         dma_handle = rte_malloc_virt2iova(buf);
4593         if (dma_handle == RTE_BAD_IOVA) {
4594                 rte_free(buf);
4595                 PMD_DRV_LOG(ERR,
4596                         "unable to map response address to physical memory\n");
4597                 return -ENOMEM;
4598         }
4599         HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4600         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4601         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4602
4603         if (rc == 0)
4604                 memcpy(data, buf, len > buflen ? buflen : len);
4605
4606         rte_free(buf);
4607         HWRM_CHECK_RESULT();
4608         HWRM_UNLOCK();
4609
4610         return rc;
4611 }
4612
4613 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4614                              uint32_t offset, uint32_t length,
4615                              uint8_t *data)
4616 {
4617         int rc;
4618         uint8_t *buf;
4619         rte_iova_t dma_handle;
4620         struct hwrm_nvm_read_input req = {0};
4621         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4622
4623         buf = rte_malloc("nvm_item", length, 0);
4624         if (!buf)
4625                 return -ENOMEM;
4626
4627         dma_handle = rte_malloc_virt2iova(buf);
4628         if (dma_handle == RTE_BAD_IOVA) {
4629                 rte_free(buf);
4630                 PMD_DRV_LOG(ERR,
4631                         "unable to map response address to physical memory\n");
4632                 return -ENOMEM;
4633         }
4634         HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4635         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4636         req.dir_idx = rte_cpu_to_le_16(index);
4637         req.offset = rte_cpu_to_le_32(offset);
4638         req.len = rte_cpu_to_le_32(length);
4639         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4640         if (rc == 0)
4641                 memcpy(data, buf, length);
4642
4643         rte_free(buf);
4644         HWRM_CHECK_RESULT();
4645         HWRM_UNLOCK();
4646
4647         return rc;
4648 }
4649
4650 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4651 {
4652         int rc;
4653         struct hwrm_nvm_erase_dir_entry_input req = {0};
4654         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4655
4656         HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4657         req.dir_idx = rte_cpu_to_le_16(index);
4658         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4659         HWRM_CHECK_RESULT();
4660         HWRM_UNLOCK();
4661
4662         return rc;
4663 }
4664
4665 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4666                           uint16_t dir_ordinal, uint16_t dir_ext,
4667                           uint16_t dir_attr, const uint8_t *data,
4668                           size_t data_len)
4669 {
4670         int rc;
4671         struct hwrm_nvm_write_input req = {0};
4672         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4673         rte_iova_t dma_handle;
4674         uint8_t *buf;
4675
4676         buf = rte_malloc("nvm_write", data_len, 0);
4677         if (!buf)
4678                 return -ENOMEM;
4679
4680         dma_handle = rte_malloc_virt2iova(buf);
4681         if (dma_handle == RTE_BAD_IOVA) {
4682                 rte_free(buf);
4683                 PMD_DRV_LOG(ERR,
4684                         "unable to map response address to physical memory\n");
4685                 return -ENOMEM;
4686         }
4687         memcpy(buf, data, data_len);
4688
4689         HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4690
4691         req.dir_type = rte_cpu_to_le_16(dir_type);
4692         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4693         req.dir_ext = rte_cpu_to_le_16(dir_ext);
4694         req.dir_attr = rte_cpu_to_le_16(dir_attr);
4695         req.dir_data_length = rte_cpu_to_le_32(data_len);
4696         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4697
4698         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4699
4700         rte_free(buf);
4701         HWRM_CHECK_RESULT();
4702         HWRM_UNLOCK();
4703
4704         return rc;
4705 }
4706
4707 static void
4708 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4709 {
4710         uint32_t *count = cbdata;
4711
4712         *count = *count + 1;
4713 }
4714
4715 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4716                                      struct bnxt_vnic_info *vnic __rte_unused)
4717 {
4718         return 0;
4719 }
4720
4721 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4722 {
4723         uint32_t count = 0;
4724
4725         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4726             &count, bnxt_vnic_count_hwrm_stub);
4727
4728         return count;
4729 }
4730
4731 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4732                                         uint16_t *vnic_ids)
4733 {
4734         struct hwrm_func_vf_vnic_ids_query_input req = {0};
4735         struct hwrm_func_vf_vnic_ids_query_output *resp =
4736                                                 bp->hwrm_cmd_resp_addr;
4737         int rc;
4738
4739         /* First query all VNIC ids */
4740         HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4741
4742         req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4743         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4744         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4745
4746         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4747                 HWRM_UNLOCK();
4748                 PMD_DRV_LOG(ERR,
4749                 "unable to map VNIC ID table address to physical memory\n");
4750                 return -ENOMEM;
4751         }
4752         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4753         HWRM_CHECK_RESULT();
4754         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4755
4756         HWRM_UNLOCK();
4757
4758         return rc;
4759 }
4760
4761 /*
4762  * This function queries the VNIC IDs  for a specified VF. It then calls
4763  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4764  * Then it calls the hwrm_cb function to program this new vnic configuration.
4765  */
4766 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4767         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4768         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4769 {
4770         struct bnxt_vnic_info vnic;
4771         int rc = 0;
4772         int i, num_vnic_ids;
4773         uint16_t *vnic_ids;
4774         size_t vnic_id_sz;
4775         size_t sz;
4776
4777         /* First query all VNIC ids */
4778         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4779         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4780                         RTE_CACHE_LINE_SIZE);
4781         if (vnic_ids == NULL)
4782                 return -ENOMEM;
4783
4784         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4785                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4786
4787         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4788
4789         if (num_vnic_ids < 0)
4790                 return num_vnic_ids;
4791
4792         /* Retrieve VNIC, update bd_stall then update */
4793
4794         for (i = 0; i < num_vnic_ids; i++) {
4795                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4796                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4797                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4798                 if (rc)
4799                         break;
4800                 if (vnic.mru <= 4)      /* Indicates unallocated */
4801                         continue;
4802
4803                 vnic_cb(&vnic, cbdata);
4804
4805                 rc = hwrm_cb(bp, &vnic);
4806                 if (rc)
4807                         break;
4808         }
4809
4810         rte_free(vnic_ids);
4811
4812         return rc;
4813 }
4814
4815 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4816                                               bool on)
4817 {
4818         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4819         struct hwrm_func_cfg_input req = {0};
4820         int rc;
4821
4822         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4823
4824         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4825         req.enables |= rte_cpu_to_le_32(
4826                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4827         req.vlan_antispoof_mode = on ?
4828                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4829                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4830         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4831
4832         HWRM_CHECK_RESULT();
4833         HWRM_UNLOCK();
4834
4835         return rc;
4836 }
4837
4838 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4839 {
4840         struct bnxt_vnic_info vnic;
4841         uint16_t *vnic_ids;
4842         size_t vnic_id_sz;
4843         int num_vnic_ids, i;
4844         size_t sz;
4845         int rc;
4846
4847         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4848         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4849                         RTE_CACHE_LINE_SIZE);
4850         if (vnic_ids == NULL)
4851                 return -ENOMEM;
4852
4853         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4854                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4855
4856         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4857         if (rc <= 0)
4858                 goto exit;
4859         num_vnic_ids = rc;
4860
4861         /*
4862          * Loop through to find the default VNIC ID.
4863          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4864          * by sending the hwrm_func_qcfg command to the firmware.
4865          */
4866         for (i = 0; i < num_vnic_ids; i++) {
4867                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4868                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4869                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4870                                         bp->pf->first_vf_id + vf);
4871                 if (rc)
4872                         goto exit;
4873                 if (vnic.func_default) {
4874                         rte_free(vnic_ids);
4875                         return vnic.fw_vnic_id;
4876                 }
4877         }
4878         /* Could not find a default VNIC. */
4879         PMD_DRV_LOG(ERR, "No default VNIC\n");
4880 exit:
4881         rte_free(vnic_ids);
4882         return rc;
4883 }
4884
4885 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4886                          uint16_t dst_id,
4887                          struct bnxt_filter_info *filter)
4888 {
4889         int rc = 0;
4890         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4891         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4892         uint32_t enables = 0;
4893
4894         if (filter->fw_em_filter_id != UINT64_MAX)
4895                 bnxt_hwrm_clear_em_filter(bp, filter);
4896
4897         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4898
4899         req.flags = rte_cpu_to_le_32(filter->flags);
4900
4901         enables = filter->enables |
4902               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4903         req.dst_id = rte_cpu_to_le_16(dst_id);
4904
4905         if (filter->ip_addr_type) {
4906                 req.ip_addr_type = filter->ip_addr_type;
4907                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4908         }
4909         if (enables &
4910             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4911                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4912         if (enables &
4913             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4914                 memcpy(req.src_macaddr, filter->src_macaddr,
4915                        RTE_ETHER_ADDR_LEN);
4916         if (enables &
4917             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4918                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4919                        RTE_ETHER_ADDR_LEN);
4920         if (enables &
4921             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4922                 req.ovlan_vid = filter->l2_ovlan;
4923         if (enables &
4924             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4925                 req.ivlan_vid = filter->l2_ivlan;
4926         if (enables &
4927             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4928                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4929         if (enables &
4930             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4931                 req.ip_protocol = filter->ip_protocol;
4932         if (enables &
4933             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4934                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4935         if (enables &
4936             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4937                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4938         if (enables &
4939             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4940                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4941         if (enables &
4942             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4943                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4944         if (enables &
4945             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4946                 req.mirror_vnic_id = filter->mirror_vnic_id;
4947
4948         req.enables = rte_cpu_to_le_32(enables);
4949
4950         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4951
4952         HWRM_CHECK_RESULT();
4953
4954         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4955         HWRM_UNLOCK();
4956
4957         return rc;
4958 }
4959
4960 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4961 {
4962         int rc = 0;
4963         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4964         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4965
4966         if (filter->fw_em_filter_id == UINT64_MAX)
4967                 return 0;
4968
4969         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4970
4971         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4972
4973         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4974
4975         HWRM_CHECK_RESULT();
4976         HWRM_UNLOCK();
4977
4978         filter->fw_em_filter_id = UINT64_MAX;
4979         filter->fw_l2_filter_id = UINT64_MAX;
4980
4981         return 0;
4982 }
4983
4984 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4985                          uint16_t dst_id,
4986                          struct bnxt_filter_info *filter)
4987 {
4988         int rc = 0;
4989         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4990         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4991                                                 bp->hwrm_cmd_resp_addr;
4992         uint32_t enables = 0;
4993
4994         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4995                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4996
4997         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4998
4999         req.flags = rte_cpu_to_le_32(filter->flags);
5000
5001         enables = filter->enables |
5002               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
5003         req.dst_id = rte_cpu_to_le_16(dst_id);
5004
5005         if (filter->ip_addr_type) {
5006                 req.ip_addr_type = filter->ip_addr_type;
5007                 enables |=
5008                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
5009         }
5010         if (enables &
5011             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
5012                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
5013         if (enables &
5014             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
5015                 memcpy(req.src_macaddr, filter->src_macaddr,
5016                        RTE_ETHER_ADDR_LEN);
5017         if (enables &
5018             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
5019                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
5020         if (enables &
5021             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
5022                 req.ip_protocol = filter->ip_protocol;
5023         if (enables &
5024             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
5025                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
5026         if (enables &
5027             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
5028                 req.src_ipaddr_mask[0] =
5029                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
5030         if (enables &
5031             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
5032                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
5033         if (enables &
5034             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
5035                 req.dst_ipaddr_mask[0] =
5036                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
5037         if (enables &
5038             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
5039                 req.src_port = rte_cpu_to_le_16(filter->src_port);
5040         if (enables &
5041             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
5042                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
5043         if (enables &
5044             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
5045                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
5046         if (enables &
5047             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
5048                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
5049         if (enables &
5050             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
5051                 req.mirror_vnic_id = filter->mirror_vnic_id;
5052
5053         req.enables = rte_cpu_to_le_32(enables);
5054
5055         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5056
5057         HWRM_CHECK_RESULT();
5058
5059         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
5060         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
5061         HWRM_UNLOCK();
5062
5063         return rc;
5064 }
5065
5066 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
5067                                 struct bnxt_filter_info *filter)
5068 {
5069         int rc = 0;
5070         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
5071         struct hwrm_cfa_ntuple_filter_free_output *resp =
5072                                                 bp->hwrm_cmd_resp_addr;
5073
5074         if (filter->fw_ntuple_filter_id == UINT64_MAX)
5075                 return 0;
5076
5077         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
5078
5079         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
5080
5081         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5082
5083         HWRM_CHECK_RESULT();
5084         HWRM_UNLOCK();
5085
5086         filter->fw_ntuple_filter_id = UINT64_MAX;
5087
5088         return 0;
5089 }
5090
5091 static int
5092 bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5093 {
5094         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5095         uint8_t *rxq_state = bp->eth_dev->data->rx_queue_state;
5096         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
5097         struct bnxt_rx_queue **rxqs = bp->rx_queues;
5098         uint16_t *ring_tbl = vnic->rss_table;
5099         int nr_ctxs = vnic->num_lb_ctxts;
5100         int max_rings = bp->rx_nr_rings;
5101         int i, j, k, cnt;
5102         int rc = 0;
5103
5104         for (i = 0, k = 0; i < nr_ctxs; i++) {
5105                 struct bnxt_rx_ring_info *rxr;
5106                 struct bnxt_cp_ring_info *cpr;
5107
5108                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
5109
5110                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
5111                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
5112                 req.hash_mode_flags = vnic->hash_mode;
5113
5114                 req.ring_grp_tbl_addr =
5115                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
5116                                      i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
5117                                      2 * sizeof(*ring_tbl));
5118                 req.hash_key_tbl_addr =
5119                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
5120
5121                 req.ring_table_pair_index = i;
5122                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
5123
5124                 for (j = 0; j < 64; j++) {
5125                         uint16_t ring_id;
5126
5127                         /* Find next active ring. */
5128                         for (cnt = 0; cnt < max_rings; cnt++) {
5129                                 if (rxq_state[k] != RTE_ETH_QUEUE_STATE_STOPPED)
5130                                         break;
5131                                 if (++k == max_rings)
5132                                         k = 0;
5133                         }
5134
5135                         /* Return if no rings are active. */
5136                         if (cnt == max_rings) {
5137                                 HWRM_UNLOCK();
5138                                 return 0;
5139                         }
5140
5141                         /* Add rx/cp ring pair to RSS table. */
5142                         rxr = rxqs[k]->rx_ring;
5143                         cpr = rxqs[k]->cp_ring;
5144
5145                         ring_id = rxr->rx_ring_struct->fw_ring_id;
5146                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
5147                         ring_id = cpr->cp_ring_struct->fw_ring_id;
5148                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
5149
5150                         if (++k == max_rings)
5151                                 k = 0;
5152                 }
5153                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5154                                             BNXT_USE_CHIMP_MB);
5155
5156                 HWRM_CHECK_RESULT();
5157                 HWRM_UNLOCK();
5158         }
5159
5160         return rc;
5161 }
5162
5163 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5164 {
5165         unsigned int rss_idx, fw_idx, i;
5166
5167         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5168                 return 0;
5169
5170         if (!(vnic->rss_table && vnic->hash_type))
5171                 return 0;
5172
5173         if (BNXT_CHIP_P5(bp))
5174                 return bnxt_vnic_rss_configure_p5(bp, vnic);
5175
5176         /*
5177          * Fill the RSS hash & redirection table with
5178          * ring group ids for all VNICs
5179          */
5180         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
5181              rss_idx++, fw_idx++) {
5182                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
5183                         fw_idx %= bp->rx_cp_nr_rings;
5184                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
5185                                 break;
5186                         fw_idx++;
5187                 }
5188
5189                 if (i == bp->rx_cp_nr_rings)
5190                         return 0;
5191
5192                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
5193         }
5194
5195         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
5196 }
5197
5198 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
5199         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5200 {
5201         uint16_t flags;
5202
5203         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
5204
5205         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
5206         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
5207
5208         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
5209         req->num_cmpl_dma_aggr_during_int =
5210                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
5211
5212         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
5213
5214         /* min timer set to 1/2 of interrupt timer */
5215         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
5216
5217         /* buf timer set to 1/4 of interrupt timer */
5218         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
5219
5220         req->cmpl_aggr_dma_tmr_during_int =
5221                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
5222
5223         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5224                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5225         req->flags = rte_cpu_to_le_16(flags);
5226 }
5227
5228 static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
5229                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
5230 {
5231         struct hwrm_ring_aggint_qcaps_input req = {0};
5232         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5233         uint32_t enables;
5234         uint16_t flags;
5235         int rc;
5236
5237         HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
5238         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5239         HWRM_CHECK_RESULT();
5240
5241         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
5242         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
5243
5244         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5245                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5246         agg_req->flags = rte_cpu_to_le_16(flags);
5247         enables =
5248          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
5249          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
5250         agg_req->enables = rte_cpu_to_le_32(enables);
5251
5252         HWRM_UNLOCK();
5253         return rc;
5254 }
5255
5256 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
5257                         struct bnxt_coal *coal, uint16_t ring_id)
5258 {
5259         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5260         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
5261                                                 bp->hwrm_cmd_resp_addr;
5262         int rc;
5263
5264         /* Set ring coalesce parameters only for 100G NICs */
5265         if (BNXT_CHIP_P5(bp)) {
5266                 if (bnxt_hwrm_set_coal_params_p5(bp, &req))
5267                         return -1;
5268         } else if (bnxt_stratus_device(bp)) {
5269                 bnxt_hwrm_set_coal_params(coal, &req);
5270         } else {
5271                 return 0;
5272         }
5273
5274         HWRM_PREP(&req,
5275                   HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5276                   BNXT_USE_CHIMP_MB);
5277         req.ring_id = rte_cpu_to_le_16(ring_id);
5278         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5279         HWRM_CHECK_RESULT();
5280         HWRM_UNLOCK();
5281         return 0;
5282 }
5283
5284 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
5285 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5286 {
5287         struct hwrm_func_backing_store_qcaps_input req = {0};
5288         struct hwrm_func_backing_store_qcaps_output *resp =
5289                 bp->hwrm_cmd_resp_addr;
5290         struct bnxt_ctx_pg_info *ctx_pg;
5291         struct bnxt_ctx_mem_info *ctx;
5292         int total_alloc_len;
5293         int rc, i, tqm_rings;
5294
5295         if (!BNXT_CHIP_P5(bp) ||
5296             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
5297             BNXT_VF(bp) ||
5298             bp->ctx)
5299                 return 0;
5300
5301         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
5302         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5303         HWRM_CHECK_RESULT_SILENT();
5304
5305         total_alloc_len = sizeof(*ctx);
5306         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
5307                           RTE_CACHE_LINE_SIZE);
5308         if (!ctx) {
5309                 rc = -ENOMEM;
5310                 goto ctx_err;
5311         }
5312
5313         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
5314         ctx->qp_min_qp1_entries =
5315                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
5316         ctx->qp_max_l2_entries =
5317                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
5318         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
5319         ctx->srq_max_l2_entries =
5320                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
5321         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
5322         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
5323         ctx->cq_max_l2_entries =
5324                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
5325         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
5326         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
5327         ctx->vnic_max_vnic_entries =
5328                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
5329         ctx->vnic_max_ring_table_entries =
5330                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
5331         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
5332         ctx->stat_max_entries =
5333                 rte_le_to_cpu_32(resp->stat_max_entries);
5334         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
5335         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
5336         ctx->tqm_min_entries_per_ring =
5337                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
5338         ctx->tqm_max_entries_per_ring =
5339                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
5340         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5341         if (!ctx->tqm_entries_multiple)
5342                 ctx->tqm_entries_multiple = 1;
5343         ctx->mrav_max_entries =
5344                 rte_le_to_cpu_32(resp->mrav_max_entries);
5345         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
5346         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
5347         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
5348         ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
5349
5350         ctx->tqm_fp_rings_count = ctx->tqm_fp_rings_count ?
5351                                   RTE_MIN(ctx->tqm_fp_rings_count,
5352                                           BNXT_MAX_TQM_FP_LEGACY_RINGS) :
5353                                   bp->max_q;
5354
5355         /* Check if the ext ring count needs to be counted.
5356          * Ext ring count is available only with new FW so we should not
5357          * look at the field on older FW.
5358          */
5359         if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
5360             bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
5361                 ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
5362                 ctx->tqm_fp_rings_count = RTE_MIN(BNXT_MAX_TQM_FP_RINGS,
5363                                                   ctx->tqm_fp_rings_count);
5364         }
5365
5366         tqm_rings = ctx->tqm_fp_rings_count + 1;
5367
5368         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
5369                             sizeof(*ctx_pg) * tqm_rings,
5370                             RTE_CACHE_LINE_SIZE);
5371         if (!ctx_pg) {
5372                 rc = -ENOMEM;
5373                 goto ctx_err;
5374         }
5375         for (i = 0; i < tqm_rings; i++, ctx_pg++)
5376                 ctx->tqm_mem[i] = ctx_pg;
5377
5378         bp->ctx = ctx;
5379 ctx_err:
5380         HWRM_UNLOCK();
5381         return rc;
5382 }
5383
5384 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
5385 {
5386         struct hwrm_func_backing_store_cfg_input req = {0};
5387         struct hwrm_func_backing_store_cfg_output *resp =
5388                 bp->hwrm_cmd_resp_addr;
5389         struct bnxt_ctx_mem_info *ctx = bp->ctx;
5390         struct bnxt_ctx_pg_info *ctx_pg;
5391         uint32_t *num_entries;
5392         uint64_t *pg_dir;
5393         uint8_t *pg_attr;
5394         uint32_t ena;
5395         int i, rc;
5396
5397         if (!ctx)
5398                 return 0;
5399
5400         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
5401         req.enables = rte_cpu_to_le_32(enables);
5402
5403         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
5404                 ctx_pg = &ctx->qp_mem;
5405                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5406                 req.qp_num_qp1_entries =
5407                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
5408                 req.qp_num_l2_entries =
5409                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
5410                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
5411                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5412                                       &req.qpc_pg_size_qpc_lvl,
5413                                       &req.qpc_page_dir);
5414         }
5415
5416         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5417                 ctx_pg = &ctx->srq_mem;
5418                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5419                 req.srq_num_l2_entries =
5420                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5421                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5422                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5423                                       &req.srq_pg_size_srq_lvl,
5424                                       &req.srq_page_dir);
5425         }
5426
5427         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5428                 ctx_pg = &ctx->cq_mem;
5429                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5430                 req.cq_num_l2_entries =
5431                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5432                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5433                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5434                                       &req.cq_pg_size_cq_lvl,
5435                                       &req.cq_page_dir);
5436         }
5437
5438         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5439                 ctx_pg = &ctx->vnic_mem;
5440                 req.vnic_num_vnic_entries =
5441                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5442                 req.vnic_num_ring_table_entries =
5443                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5444                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5445                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5446                                       &req.vnic_pg_size_vnic_lvl,
5447                                       &req.vnic_page_dir);
5448         }
5449
5450         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5451                 ctx_pg = &ctx->stat_mem;
5452                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5453                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5454                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5455                                       &req.stat_pg_size_stat_lvl,
5456                                       &req.stat_page_dir);
5457         }
5458
5459         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5460         num_entries = &req.tqm_sp_num_entries;
5461         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5462         pg_dir = &req.tqm_sp_page_dir;
5463         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5464         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5465                 if (!(enables & ena))
5466                         continue;
5467
5468                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5469
5470                 ctx_pg = ctx->tqm_mem[i];
5471                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5472                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5473         }
5474
5475         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
5476                 /* DPDK does not need to configure MRAV and TIM type.
5477                  * So we are skipping over MRAV and TIM. Skip to configure
5478                  * HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8.
5479                  */
5480                 ctx_pg = ctx->tqm_mem[BNXT_MAX_TQM_LEGACY_RINGS];
5481                 req.tqm_ring8_num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5482                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5483                                       &req.tqm_ring8_pg_size_tqm_ring_lvl,
5484                                       &req.tqm_ring8_page_dir);
5485         }
5486
5487         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5488         HWRM_CHECK_RESULT();
5489         HWRM_UNLOCK();
5490
5491         return rc;
5492 }
5493
5494 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5495 {
5496         struct hwrm_port_qstats_ext_input req = {0};
5497         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5498         struct bnxt_pf_info *pf = bp->pf;
5499         int rc;
5500
5501         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5502               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5503                 return 0;
5504
5505         HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5506
5507         req.port_id = rte_cpu_to_le_16(pf->port_id);
5508         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5509                 req.tx_stat_host_addr =
5510                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5511                 req.tx_stat_size =
5512                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5513         }
5514         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5515                 req.rx_stat_host_addr =
5516                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5517                 req.rx_stat_size =
5518                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5519         }
5520         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5521
5522         if (rc) {
5523                 bp->fw_rx_port_stats_ext_size = 0;
5524                 bp->fw_tx_port_stats_ext_size = 0;
5525         } else {
5526                 bp->fw_rx_port_stats_ext_size =
5527                         rte_le_to_cpu_16(resp->rx_stat_size);
5528                 bp->fw_tx_port_stats_ext_size =
5529                         rte_le_to_cpu_16(resp->tx_stat_size);
5530         }
5531
5532         HWRM_CHECK_RESULT();
5533         HWRM_UNLOCK();
5534
5535         return rc;
5536 }
5537
5538 int
5539 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5540 {
5541         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5542         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5543                 bp->hwrm_cmd_resp_addr;
5544         int rc = 0;
5545
5546         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5547         req.tunnel_type = type;
5548         req.dest_fid = bp->fw_fid;
5549         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5550         HWRM_CHECK_RESULT();
5551
5552         HWRM_UNLOCK();
5553
5554         return rc;
5555 }
5556
5557 int
5558 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5559 {
5560         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5561         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5562                 bp->hwrm_cmd_resp_addr;
5563         int rc = 0;
5564
5565         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5566         req.tunnel_type = type;
5567         req.dest_fid = bp->fw_fid;
5568         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5569         HWRM_CHECK_RESULT();
5570
5571         HWRM_UNLOCK();
5572
5573         return rc;
5574 }
5575
5576 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5577 {
5578         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5579         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5580                 bp->hwrm_cmd_resp_addr;
5581         int rc = 0;
5582
5583         HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5584         req.src_fid = bp->fw_fid;
5585         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5586         HWRM_CHECK_RESULT();
5587
5588         if (type)
5589                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5590
5591         HWRM_UNLOCK();
5592
5593         return rc;
5594 }
5595
5596 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5597                                    uint16_t *dst_fid)
5598 {
5599         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5600         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5601                 bp->hwrm_cmd_resp_addr;
5602         int rc = 0;
5603
5604         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5605         req.src_fid = bp->fw_fid;
5606         req.tunnel_type = tun_type;
5607         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5608         HWRM_CHECK_RESULT();
5609
5610         if (dst_fid)
5611                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5612
5613         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5614
5615         HWRM_UNLOCK();
5616
5617         return rc;
5618 }
5619
5620 int bnxt_hwrm_set_mac(struct bnxt *bp)
5621 {
5622         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5623         struct hwrm_func_vf_cfg_input req = {0};
5624         int rc = 0;
5625
5626         if (!BNXT_VF(bp))
5627                 return 0;
5628
5629         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5630
5631         req.enables =
5632                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5633         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5634
5635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5636
5637         HWRM_CHECK_RESULT();
5638
5639         HWRM_UNLOCK();
5640
5641         return rc;
5642 }
5643
5644 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5645 {
5646         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5647         struct hwrm_func_drv_if_change_input req = {0};
5648         uint32_t flags;
5649         int rc;
5650
5651         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5652                 return 0;
5653
5654         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5655          * If we issue FUNC_DRV_IF_CHANGE with flags down before
5656          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5657          */
5658         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5659                 return 0;
5660
5661         HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5662
5663         if (up)
5664                 req.flags =
5665                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5666
5667         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5668
5669         HWRM_CHECK_RESULT();
5670         flags = rte_le_to_cpu_32(resp->flags);
5671         HWRM_UNLOCK();
5672
5673         if (!up)
5674                 return 0;
5675
5676         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5677                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5678                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5679         }
5680
5681         return 0;
5682 }
5683
5684 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5685 {
5686         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5687         struct bnxt_error_recovery_info *info = bp->recovery_info;
5688         struct hwrm_error_recovery_qcfg_input req = {0};
5689         uint32_t flags = 0;
5690         unsigned int i;
5691         int rc;
5692
5693         /* Older FW does not have error recovery support */
5694         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5695                 return 0;
5696
5697         HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5698
5699         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5700
5701         HWRM_CHECK_RESULT();
5702
5703         flags = rte_le_to_cpu_32(resp->flags);
5704         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5705                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5706         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5707                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5708
5709         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5710             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5711                 rc = -EINVAL;
5712                 goto err;
5713         }
5714
5715         /* FW returned values are in units of 100msec */
5716         info->driver_polling_freq =
5717                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5718         info->primary_func_wait_period =
5719                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5720         info->normal_func_wait_period =
5721                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5722         info->primary_func_wait_period_after_reset =
5723                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5724         info->max_bailout_time_after_reset =
5725                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5726         info->status_regs[BNXT_FW_STATUS_REG] =
5727                 rte_le_to_cpu_32(resp->fw_health_status_reg);
5728         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5729                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5730         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5731                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5732         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5733                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5734         info->reg_array_cnt =
5735                 rte_le_to_cpu_32(resp->reg_array_cnt);
5736
5737         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5738                 rc = -EINVAL;
5739                 goto err;
5740         }
5741
5742         for (i = 0; i < info->reg_array_cnt; i++) {
5743                 info->reset_reg[i] =
5744                         rte_le_to_cpu_32(resp->reset_reg[i]);
5745                 info->reset_reg_val[i] =
5746                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
5747                 info->delay_after_reset[i] =
5748                         resp->delay_after_reset[i];
5749         }
5750 err:
5751         HWRM_UNLOCK();
5752
5753         /* Map the FW status registers */
5754         if (!rc)
5755                 rc = bnxt_map_fw_health_status_regs(bp);
5756
5757         if (rc) {
5758                 rte_free(bp->recovery_info);
5759                 bp->recovery_info = NULL;
5760         }
5761         return rc;
5762 }
5763
5764 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5765 {
5766         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5767         struct hwrm_fw_reset_input req = {0};
5768         int rc;
5769
5770         if (!BNXT_PF(bp))
5771                 return -EOPNOTSUPP;
5772
5773         HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5774
5775         req.embedded_proc_type =
5776                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5777         req.selfrst_status =
5778                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5779         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5780
5781         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5782                                     BNXT_USE_KONG(bp));
5783
5784         HWRM_CHECK_RESULT();
5785         HWRM_UNLOCK();
5786
5787         return rc;
5788 }
5789
5790 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5791 {
5792         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5793         struct hwrm_port_ts_query_input req = {0};
5794         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5795         uint32_t flags = 0;
5796         int rc;
5797
5798         if (!ptp)
5799                 return 0;
5800
5801         HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5802
5803         switch (path) {
5804         case BNXT_PTP_FLAGS_PATH_TX:
5805                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5806                 break;
5807         case BNXT_PTP_FLAGS_PATH_RX:
5808                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5809                 break;
5810         case BNXT_PTP_FLAGS_CURRENT_TIME:
5811                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5812                 break;
5813         }
5814
5815         req.flags = rte_cpu_to_le_32(flags);
5816         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5817
5818         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5819
5820         HWRM_CHECK_RESULT();
5821
5822         if (timestamp) {
5823                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5824                 *timestamp |=
5825                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5826         }
5827         HWRM_UNLOCK();
5828
5829         return rc;
5830 }
5831
5832 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5833 {
5834         int rc = 0;
5835
5836         struct hwrm_cfa_counter_qcaps_input req = {0};
5837         struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5838
5839         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5840                 PMD_DRV_LOG(DEBUG,
5841                             "Not a PF or trusted VF. Command not supported\n");
5842                 return 0;
5843         }
5844
5845         HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5846         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5847         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5848
5849         HWRM_CHECK_RESULT();
5850         if (max_fc)
5851                 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5852         HWRM_UNLOCK();
5853
5854         return 0;
5855 }
5856
5857 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5858 {
5859         int rc = 0;
5860         struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5861         struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5862
5863         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5864                 PMD_DRV_LOG(DEBUG,
5865                             "Not a PF or trusted VF. Command not supported\n");
5866                 return 0;
5867         }
5868
5869         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5870
5871         req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5872         req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5873         req.page_dir = rte_cpu_to_le_64(dma_addr);
5874
5875         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5876
5877         HWRM_CHECK_RESULT();
5878         if (ctx_id) {
5879                 *ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5880                 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5881         }
5882         HWRM_UNLOCK();
5883
5884         return 0;
5885 }
5886
5887 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5888 {
5889         int rc = 0;
5890         struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5891         struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5892
5893         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5894                 PMD_DRV_LOG(DEBUG,
5895                             "Not a PF or trusted VF. Command not supported\n");
5896                 return 0;
5897         }
5898
5899         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5900
5901         req.ctx_id = rte_cpu_to_le_16(ctx_id);
5902
5903         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5904
5905         HWRM_CHECK_RESULT();
5906         HWRM_UNLOCK();
5907
5908         return rc;
5909 }
5910
5911 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5912                               uint16_t cntr, uint16_t ctx_id,
5913                               uint32_t num_entries, bool enable)
5914 {
5915         struct hwrm_cfa_counter_cfg_input req = {0};
5916         struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5917         uint16_t flags = 0;
5918         int rc;
5919
5920         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5921                 PMD_DRV_LOG(DEBUG,
5922                             "Not a PF or trusted VF. Command not supported\n");
5923                 return 0;
5924         }
5925
5926         HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5927
5928         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5929         req.counter_type = rte_cpu_to_le_16(cntr);
5930         flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5931                 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5932         flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5933         if (dir == BNXT_DIR_RX)
5934                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5935         else if (dir == BNXT_DIR_TX)
5936                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5937         req.flags = rte_cpu_to_le_16(flags);
5938         req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5939         req.num_entries = rte_cpu_to_le_32(num_entries);
5940
5941         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5942         HWRM_CHECK_RESULT();
5943         HWRM_UNLOCK();
5944
5945         return 0;
5946 }
5947
5948 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5949                                  enum bnxt_flow_dir dir,
5950                                  uint16_t cntr,
5951                                  uint16_t num_entries)
5952 {
5953         struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5954         struct hwrm_cfa_counter_qstats_input req = {0};
5955         uint16_t flow_ctx_id = 0;
5956         uint16_t flags = 0;
5957         int rc = 0;
5958
5959         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5960                 PMD_DRV_LOG(DEBUG,
5961                             "Not a PF or trusted VF. Command not supported\n");
5962                 return 0;
5963         }
5964
5965         if (dir == BNXT_DIR_RX) {
5966                 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5967                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5968         } else if (dir == BNXT_DIR_TX) {
5969                 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5970                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5971         }
5972
5973         HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5974         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5975         req.counter_type = rte_cpu_to_le_16(cntr);
5976         req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5977         req.num_entries = rte_cpu_to_le_16(num_entries);
5978         req.flags = rte_cpu_to_le_16(flags);
5979         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5980
5981         HWRM_CHECK_RESULT();
5982         HWRM_UNLOCK();
5983
5984         return 0;
5985 }
5986
5987 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5988                                 uint16_t *first_vf_id)
5989 {
5990         int rc = 0;
5991         struct hwrm_func_qcaps_input req = {.req_type = 0 };
5992         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5993
5994         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5995
5996         req.fid = rte_cpu_to_le_16(fid);
5997
5998         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5999
6000         HWRM_CHECK_RESULT();
6001
6002         if (first_vf_id)
6003                 *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
6004
6005         HWRM_UNLOCK();
6006
6007         return rc;
6008 }
6009
6010 int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, struct bnxt_representor *rep_bp)
6011 {
6012         struct hwrm_cfa_pair_info_output *resp = bp->hwrm_cmd_resp_addr;
6013         struct hwrm_cfa_pair_info_input req = {0};
6014         int rc = 0;
6015
6016         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
6017                 PMD_DRV_LOG(DEBUG,
6018                             "Not a PF or trusted VF. Command not supported\n");
6019                 return 0;
6020         }
6021
6022         HWRM_PREP(&req, HWRM_CFA_PAIR_INFO, BNXT_USE_CHIMP_MB);
6023         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
6024                  bp->eth_dev->data->name, rep_bp->vf_id);
6025         req.flags =
6026                 rte_cpu_to_le_32(HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE);
6027
6028         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6029         HWRM_CHECK_RESULT();
6030         if (rc == HWRM_ERR_CODE_SUCCESS && strlen(resp->pair_name)) {
6031                 HWRM_UNLOCK();
6032                 return !rc;
6033         }
6034         HWRM_UNLOCK();
6035         return rc;
6036 }
6037
6038 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
6039 {
6040         struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6041         struct hwrm_cfa_pair_alloc_input req = {0};
6042         int rc;
6043
6044         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
6045                 PMD_DRV_LOG(DEBUG,
6046                             "Not a PF or trusted VF. Command not supported\n");
6047                 return 0;
6048         }
6049
6050         HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
6051         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
6052         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
6053                  bp->eth_dev->data->name, rep_bp->vf_id);
6054
6055         req.pf_b_id = rep_bp->parent_pf_idx;
6056         req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
6057                                                 rte_cpu_to_le_16(rep_bp->vf_id);
6058         req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
6059         req.host_b_id = 1; /* TBD - Confirm if this is OK */
6060
6061         req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
6062                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
6063         req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
6064                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
6065         req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
6066                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
6067         req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
6068                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
6069
6070         req.q_ab = rep_bp->rep_q_r2f;
6071         req.q_ba = rep_bp->rep_q_f2r;
6072         req.fc_ab = rep_bp->rep_fc_r2f;
6073         req.fc_ba = rep_bp->rep_fc_f2r;
6074
6075         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6076         HWRM_CHECK_RESULT();
6077
6078         HWRM_UNLOCK();
6079         PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
6080                     BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
6081         return rc;
6082 }
6083
6084 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
6085 {
6086         struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
6087         struct hwrm_cfa_pair_free_input req = {0};
6088         int rc;
6089
6090         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
6091                 PMD_DRV_LOG(DEBUG,
6092                             "Not a PF or trusted VF. Command not supported\n");
6093                 return 0;
6094         }
6095
6096         HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
6097         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
6098                  bp->eth_dev->data->name, rep_bp->vf_id);
6099         req.pf_b_id = rep_bp->parent_pf_idx;
6100         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
6101         req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
6102                                                 rte_cpu_to_le_16(rep_bp->vf_id);
6103         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6104         HWRM_CHECK_RESULT();
6105         HWRM_UNLOCK();
6106         PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
6107                     rep_bp->vf_id);
6108         return rc;
6109 }
6110
6111 int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1,
6112                             uint32_t echo_req_data2)
6113 {
6114         struct hwrm_func_echo_response_input req = {0};
6115         struct hwrm_func_echo_response_output *resp = bp->hwrm_cmd_resp_addr;
6116         int rc;
6117
6118         HWRM_PREP(&req, HWRM_FUNC_ECHO_RESPONSE, BNXT_USE_CHIMP_MB);
6119         req.event_data1 = rte_cpu_to_le_32(echo_req_data1);
6120         req.event_data2 = rte_cpu_to_le_32(echo_req_data2);
6121
6122         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6123
6124         HWRM_CHECK_RESULT();
6125         HWRM_UNLOCK();
6126
6127         return rc;
6128 }
6129
6130 int bnxt_hwrm_poll_ver_get(struct bnxt *bp)
6131 {
6132         struct hwrm_ver_get_input req = {.req_type = 0 };
6133         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
6134         int rc = 0;
6135
6136         bp->max_req_len = HWRM_MAX_REQ_LEN;
6137         bp->max_resp_len = BNXT_PAGE_SIZE;
6138         bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
6139
6140         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
6141         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6142         req.hwrm_intf_min = HWRM_VERSION_MINOR;
6143         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6144
6145         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6146
6147         HWRM_CHECK_RESULT_SILENT();
6148
6149         if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
6150                 rc = -EAGAIN;
6151
6152         HWRM_UNLOCK();
6153
6154         return rc;
6155 }
6156
6157 int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr,
6158                                           uint16_t page_number, uint16_t start_addr,
6159                                           uint16_t data_length, uint8_t *buf)
6160 {
6161         struct hwrm_port_phy_i2c_read_output *resp = bp->hwrm_cmd_resp_addr;
6162         struct hwrm_port_phy_i2c_read_input req = {0};
6163         uint32_t enables = HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET;
6164         int rc, byte_offset = 0;
6165
6166         do {
6167                 uint16_t xfer_size;
6168
6169                 HWRM_PREP(&req, HWRM_PORT_PHY_I2C_READ, BNXT_USE_CHIMP_MB);
6170                 req.i2c_slave_addr = i2c_addr;
6171                 req.page_number = rte_cpu_to_le_16(page_number);
6172                 req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
6173
6174                 xfer_size = RTE_MIN(data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
6175                 req.page_offset = rte_cpu_to_le_16(start_addr + byte_offset);
6176                 req.data_length = xfer_size;
6177                 req.enables = rte_cpu_to_le_32(start_addr + byte_offset ? enables : 0);
6178                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6179                 HWRM_CHECK_RESULT();
6180
6181                 memcpy(buf + byte_offset, resp->data, xfer_size);
6182
6183                 data_length -= xfer_size;
6184                 byte_offset += xfer_size;
6185
6186                 HWRM_UNLOCK();
6187         } while (data_length > 0);
6188
6189         return rc;
6190 }
6191
6192 void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index)
6193 {
6194         struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
6195         struct bnxt_tx_ring_info *txr = txq->tx_ring;
6196         struct bnxt_ring *ring = txr->tx_ring_struct;
6197         struct bnxt_cp_ring_info *cpr = txq->cp_ring;
6198
6199         bnxt_hwrm_ring_free(bp, ring,
6200                             HWRM_RING_FREE_INPUT_RING_TYPE_TX,
6201                             cpr->cp_ring_struct->fw_ring_id);
6202         txr->tx_raw_prod = 0;
6203         txr->tx_raw_cons = 0;
6204         memset(txr->tx_desc_ring, 0,
6205                 txr->tx_ring_struct->ring_size * sizeof(*txr->tx_desc_ring));
6206         memset(txr->tx_buf_ring, 0,
6207                 txr->tx_ring_struct->ring_size * sizeof(*txr->tx_buf_ring));
6208
6209         bnxt_hwrm_stat_ctx_free(bp, cpr);
6210
6211         bnxt_free_cp_ring(bp, cpr);
6212 }
6213
6214 int bnxt_hwrm_config_host_mtu(struct bnxt *bp)
6215 {
6216         struct hwrm_func_cfg_input req = {0};
6217         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
6218         int rc;
6219
6220         if (!BNXT_PF(bp))
6221                 return 0;
6222
6223         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
6224
6225         req.fid = rte_cpu_to_le_16(0xffff);
6226         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU);
6227         req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu);
6228
6229         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6230         HWRM_CHECK_RESULT();
6231         HWRM_UNLOCK();
6232
6233         return rc;
6234 }
6235
6236 int
6237 bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6238 {
6239         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
6240         struct hwrm_vnic_rss_cfg_input req = {0};
6241         int nr_ctxs = vnic->num_lb_ctxts;
6242         int i, rc = 0;
6243
6244         for (i = 0; i < nr_ctxs; i++) {
6245                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
6246
6247                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
6248                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
6249
6250                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6251
6252                 HWRM_CHECK_RESULT();
6253                 HWRM_UNLOCK();
6254         }
6255
6256         return rc;
6257 }