net/bnxt: check flush status during ring free
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #define HWRM_SPEC_CODE_1_8_3            0x10803
28 #define HWRM_VERSION_1_9_1              0x10901
29 #define HWRM_VERSION_1_9_2              0x10903
30 #define HWRM_VERSION_1_10_2_13          0x10a020d
31 struct bnxt_plcmodes_cfg {
32         uint32_t        flags;
33         uint16_t        jumbo_thresh;
34         uint16_t        hds_offset;
35         uint16_t        hds_threshold;
36 };
37
38 static int page_getenum(size_t size)
39 {
40         if (size <= 1 << 4)
41                 return 4;
42         if (size <= 1 << 12)
43                 return 12;
44         if (size <= 1 << 13)
45                 return 13;
46         if (size <= 1 << 16)
47                 return 16;
48         if (size <= 1 << 21)
49                 return 21;
50         if (size <= 1 << 22)
51                 return 22;
52         if (size <= 1 << 30)
53                 return 30;
54         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55         return sizeof(int) * 8 - 1;
56 }
57
58 static int page_roundup(size_t size)
59 {
60         return 1 << page_getenum(size);
61 }
62
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64                                   uint8_t *pg_attr,
65                                   uint64_t *pg_dir)
66 {
67         if (rmem->nr_pages == 0)
68                 return;
69
70         if (rmem->nr_pages > 1) {
71                 *pg_attr = 1;
72                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
73         } else {
74                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
75         }
76 }
77
78 static struct bnxt_cp_ring_info*
79 bnxt_get_ring_info_by_id(struct bnxt *bp, uint16_t rid, uint16_t type)
80 {
81         struct bnxt_cp_ring_info *cp_ring = NULL;
82         uint16_t i;
83
84         switch (type) {
85         case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
86         case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
87                 /* FALLTHROUGH */
88                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
89                         struct bnxt_rx_queue *rxq = bp->rx_queues[i];
90
91                         if (rxq->cp_ring->cp_ring_struct->fw_ring_id ==
92                             rte_cpu_to_le_16(rid)) {
93                                 return rxq->cp_ring;
94                         }
95                 }
96                 break;
97         case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
98                 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
99                         struct bnxt_tx_queue *txq = bp->tx_queues[i];
100
101                         if (txq->cp_ring->cp_ring_struct->fw_ring_id ==
102                             rte_cpu_to_le_16(rid)) {
103                                 return txq->cp_ring;
104                         }
105                 }
106                 break;
107         default:
108                 return cp_ring;
109         }
110         return cp_ring;
111 }
112
113 /* Complete a sweep of the CQ ring for the corresponding Tx/Rx/AGG ring.
114  * If the CMPL_BASE_TYPE_HWRM_DONE is not encountered by the last pass,
115  * before timeout, we force the done bit for the cleanup to proceed.
116  * Also if cpr is null, do nothing.. The HWRM command is  not for a
117  * Tx/Rx/AGG ring cleanup.
118  */
119 static int
120 bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
121                         bool tx, bool rx, bool timeout)
122 {
123         int done = 0;
124
125         if (cpr != NULL) {
126                 if (tx)
127                         done = bnxt_flush_tx_cmp(cpr);
128
129                 if (rx)
130                         done = bnxt_flush_rx_cmp(cpr);
131
132                 if (done)
133                         PMD_DRV_LOG(DEBUG, "HWRM DONE for %s ring\n",
134                                     rx ? "Rx" : "Tx");
135
136                 /* We are about to timeout and still haven't seen the
137                  * HWRM done for the Ring free. Force the cleanup.
138                  */
139                 if (!done && timeout) {
140                         done = 1;
141                         PMD_DRV_LOG(DEBUG, "Timing out for %s ring\n",
142                                     rx ? "Rx" : "Tx");
143                 }
144         } else {
145                 /* This HWRM command is not for a Tx/Rx/AGG ring cleanup.
146                  * Otherwise the cpr would have been valid. So do nothing.
147                  */
148                 done = 1;
149         }
150
151         return done;
152 }
153
154 /*
155  * HWRM Functions (sent to HWRM)
156  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
157  * HWRM command times out, or a negative error code if the HWRM
158  * command was failed by the FW.
159  */
160
161 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
162                                   uint32_t msg_len, bool use_kong_mb)
163 {
164         unsigned int i;
165         struct input *req = msg;
166         struct output *resp = bp->hwrm_cmd_resp_addr;
167         uint32_t *data = msg;
168         uint8_t *bar;
169         uint8_t *valid;
170         uint16_t max_req_len = bp->max_req_len;
171         struct hwrm_short_input short_input = { 0 };
172         uint16_t bar_offset = use_kong_mb ?
173                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
174         uint16_t mb_trigger_offset = use_kong_mb ?
175                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
176         struct bnxt_cp_ring_info *cpr = NULL;
177         bool is_rx = false;
178         bool is_tx = false;
179         uint32_t timeout;
180
181         /* Do not send HWRM commands to firmware in error state */
182         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
183                 return 0;
184
185         timeout = bp->hwrm_cmd_timeout;
186
187         /* Update the message length for backing store config for new FW. */
188         if (bp->fw_ver >= HWRM_VERSION_1_10_2_13 &&
189             rte_cpu_to_le_16(req->req_type) == HWRM_FUNC_BACKING_STORE_CFG)
190                 msg_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
191
192         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
193             msg_len > bp->max_req_len) {
194                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
195
196                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
197                 memcpy(short_cmd_req, req, msg_len);
198
199                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
200                 short_input.signature = rte_cpu_to_le_16(
201                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
202                 short_input.size = rte_cpu_to_le_16(msg_len);
203                 short_input.req_addr =
204                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
205
206                 data = (uint32_t *)&short_input;
207                 msg_len = sizeof(short_input);
208
209                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
210         }
211
212         /* Write request msg to hwrm channel */
213         for (i = 0; i < msg_len; i += 4) {
214                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
215                 rte_write32(*data, bar);
216                 data++;
217         }
218
219         /* Zero the rest of the request space */
220         for (; i < max_req_len; i += 4) {
221                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
222                 rte_write32(0, bar);
223         }
224
225         /* Ring channel doorbell */
226         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
227         rte_write32(1, bar);
228         /*
229          * Make sure the channel doorbell ring command complete before
230          * reading the response to avoid getting stale or invalid
231          * responses.
232          */
233         rte_io_mb();
234
235         /* Check ring flush is done.
236          * This is valid only for Tx and Rx rings (including AGG rings).
237          * The Tx and Rx rings should be freed once the HW confirms all
238          * the internal buffers and BDs associated with the rings are
239          * consumed and the corresponding DMA is handled.
240          */
241         if (rte_cpu_to_le_16(req->cmpl_ring) != INVALID_HW_RING_ID) {
242                 /* Check if the TxCQ matches. If that fails check if RxCQ
243                  * matches. And if neither match, is_rx = false, is_tx = false.
244                  */
245                 cpr = bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
246                                                HWRM_RING_FREE_INPUT_RING_TYPE_TX);
247                 if (cpr == NULL) {
248                         /* Not a TxCQ. Check if the RxCQ matches. */
249                         cpr =
250                         bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
251                                                  HWRM_RING_FREE_INPUT_RING_TYPE_RX);
252                         if (cpr != NULL)
253                                 is_rx = true;
254                 } else {
255                         is_tx = true;
256                 }
257         }
258
259         /* Poll for the valid bit */
260         for (i = 0; i < timeout; i++) {
261                 int done;
262
263                 done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx,
264                                                i == timeout - 1);
265                 /* Sanity check on the resp->resp_len */
266                 rte_io_rmb();
267                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
268                         /* Last byte of resp contains the valid key */
269                         valid = (uint8_t *)resp + resp->resp_len - 1;
270                         if (*valid == HWRM_RESP_VALID_KEY && done)
271                                 break;
272                 }
273                 rte_delay_us(1);
274         }
275
276         if (i >= timeout) {
277                 /* Suppress VER_GET timeout messages during reset recovery */
278                 if (bp->flags & BNXT_FLAG_FW_RESET &&
279                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
280                         return -ETIMEDOUT;
281
282                 PMD_DRV_LOG(ERR,
283                             "Error(timeout) sending msg 0x%04x, seq_id %d\n",
284                             req->req_type, req->seq_id);
285                 return -ETIMEDOUT;
286         }
287         return 0;
288 }
289
290 /*
291  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
292  * spinlock, and does initial processing.
293  *
294  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
295  * releases the spinlock only if it returns. If the regular int return codes
296  * are not used by the function, HWRM_CHECK_RESULT() should not be used
297  * directly, rather it should be copied and modified to suit the function.
298  *
299  * HWRM_UNLOCK() must be called after all response processing is completed.
300  */
301 #define HWRM_PREP(req, type, kong) do { \
302         rte_spinlock_lock(&bp->hwrm_lock); \
303         if (bp->hwrm_cmd_resp_addr == NULL) { \
304                 rte_spinlock_unlock(&bp->hwrm_lock); \
305                 return -EACCES; \
306         } \
307         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
308         (req)->req_type = rte_cpu_to_le_16(type); \
309         (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
310         (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
311                 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
312         (req)->target_id = rte_cpu_to_le_16(0xffff); \
313         (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
314 } while (0)
315
316 #define HWRM_CHECK_RESULT_SILENT() do {\
317         if (rc) { \
318                 rte_spinlock_unlock(&bp->hwrm_lock); \
319                 return rc; \
320         } \
321         if (resp->error_code) { \
322                 rc = rte_le_to_cpu_16(resp->error_code); \
323                 rte_spinlock_unlock(&bp->hwrm_lock); \
324                 return rc; \
325         } \
326 } while (0)
327
328 #define HWRM_CHECK_RESULT() do {\
329         if (rc) { \
330                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
331                 rte_spinlock_unlock(&bp->hwrm_lock); \
332                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
333                         rc = -EACCES; \
334                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
335                         rc = -ENOSPC; \
336                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
337                         rc = -EINVAL; \
338                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
339                         rc = -ENOTSUP; \
340                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
341                         rc = -EAGAIN; \
342                 else if (rc > 0) \
343                         rc = -EIO; \
344                 return rc; \
345         } \
346         if (resp->error_code) { \
347                 rc = rte_le_to_cpu_16(resp->error_code); \
348                 if (resp->resp_len >= 16) { \
349                         struct hwrm_err_output *tmp_hwrm_err_op = \
350                                                 (void *)resp; \
351                         PMD_DRV_LOG(ERR, \
352                                 "error %d:%d:%08x:%04x\n", \
353                                 rc, tmp_hwrm_err_op->cmd_err, \
354                                 rte_le_to_cpu_32(\
355                                         tmp_hwrm_err_op->opaque_0), \
356                                 rte_le_to_cpu_16(\
357                                         tmp_hwrm_err_op->opaque_1)); \
358                 } else { \
359                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
360                 } \
361                 rte_spinlock_unlock(&bp->hwrm_lock); \
362                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
363                         rc = -EACCES; \
364                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
365                         rc = -ENOSPC; \
366                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
367                         rc = -EINVAL; \
368                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
369                         rc = -ENOTSUP; \
370                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
371                         rc = -EAGAIN; \
372                 else if (rc > 0) \
373                         rc = -EIO; \
374                 return rc; \
375         } \
376 } while (0)
377
378 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
379
380 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
381                                 bool use_kong_mb,
382                                 uint16_t msg_type,
383                                 void *msg,
384                                 uint32_t msg_len,
385                                 void *resp_msg,
386                                 uint32_t resp_len)
387 {
388         int rc = 0;
389         bool mailbox = BNXT_USE_CHIMP_MB;
390         struct input *req = msg;
391         struct output *resp = bp->hwrm_cmd_resp_addr;
392
393         if (use_kong_mb)
394                 mailbox = BNXT_USE_KONG(bp);
395
396         HWRM_PREP(req, msg_type, mailbox);
397
398         rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
399
400         HWRM_CHECK_RESULT();
401
402         if (resp_msg)
403                 memcpy(resp_msg, resp, resp_len);
404
405         HWRM_UNLOCK();
406
407         return rc;
408 }
409
410 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
411                                   bool use_kong_mb,
412                                   uint16_t tf_type,
413                                   uint16_t tf_subtype,
414                                   uint32_t *tf_response_code,
415                                   void *msg,
416                                   uint32_t msg_len,
417                                   void *response,
418                                   uint32_t response_len)
419 {
420         int rc = 0;
421         struct hwrm_cfa_tflib_input req = { .req_type = 0 };
422         struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
423         bool mailbox = BNXT_USE_CHIMP_MB;
424
425         if (msg_len > sizeof(req.tf_req))
426                 return -ENOMEM;
427
428         if (use_kong_mb)
429                 mailbox = BNXT_USE_KONG(bp);
430
431         HWRM_PREP(&req, HWRM_TF, mailbox);
432         /* Build request using the user supplied request payload.
433          * TLV request size is checked at build time against HWRM
434          * request max size, thus no checking required.
435          */
436         req.tf_type = tf_type;
437         req.tf_subtype = tf_subtype;
438         memcpy(req.tf_req, msg, msg_len);
439
440         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
441         HWRM_CHECK_RESULT();
442
443         /* Copy the resp to user provided response buffer */
444         if (response != NULL)
445                 /* Post process response data. We need to copy only
446                  * the 'payload' as the HWRM data structure really is
447                  * HWRM header + msg header + payload and the TFLIB
448                  * only provided a payload place holder.
449                  */
450                 if (response_len != 0) {
451                         memcpy(response,
452                                resp->tf_resp,
453                                response_len);
454                 }
455
456         /* Extract the internal tflib response code */
457         *tf_response_code = resp->tf_resp_code;
458         HWRM_UNLOCK();
459
460         return rc;
461 }
462
463 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
464 {
465         int rc = 0;
466         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
467         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
468
469         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
470         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
471         req.mask = 0;
472
473         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
474
475         HWRM_CHECK_RESULT();
476         HWRM_UNLOCK();
477
478         return rc;
479 }
480
481 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
482                                  struct bnxt_vnic_info *vnic,
483                                  uint16_t vlan_count,
484                                  struct bnxt_vlan_table_entry *vlan_table)
485 {
486         int rc = 0;
487         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
488         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
489         uint32_t mask = 0;
490
491         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
492                 return rc;
493
494         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
495         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
496
497         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
498                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
499         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
500                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
501
502         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
503                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
504
505         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
506                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
507         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
508                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
509                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
510                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
511         }
512         if (vlan_table) {
513                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
514                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
515                 req.vlan_tag_tbl_addr =
516                         rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
517                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
518         }
519         req.mask = rte_cpu_to_le_32(mask);
520
521         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
522
523         HWRM_CHECK_RESULT();
524         HWRM_UNLOCK();
525
526         return rc;
527 }
528
529 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
530                         uint16_t vlan_count,
531                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
532 {
533         int rc = 0;
534         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
535         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
536                                                 bp->hwrm_cmd_resp_addr;
537
538         /*
539          * Older HWRM versions did not support this command, and the set_rx_mask
540          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
541          * removed from set_rx_mask call, and this command was added.
542          *
543          * This command is also present from 1.7.8.11 and higher,
544          * as well as 1.7.8.0
545          */
546         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
547                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
548                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
549                                         (11)))
550                                 return 0;
551                 }
552         }
553         HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
554         req.fid = rte_cpu_to_le_16(fid);
555
556         req.vlan_tag_mask_tbl_addr =
557                 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
558         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
559
560         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
561
562         HWRM_CHECK_RESULT();
563         HWRM_UNLOCK();
564
565         return rc;
566 }
567
568 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
569                              struct bnxt_filter_info *filter)
570 {
571         int rc = 0;
572         struct bnxt_filter_info *l2_filter = filter;
573         struct bnxt_vnic_info *vnic = NULL;
574         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
575         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
576
577         if (filter->fw_l2_filter_id == UINT64_MAX)
578                 return 0;
579
580         if (filter->matching_l2_fltr_ptr)
581                 l2_filter = filter->matching_l2_fltr_ptr;
582
583         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
584                     filter, l2_filter, l2_filter->l2_ref_cnt);
585
586         if (l2_filter->l2_ref_cnt == 0)
587                 return 0;
588
589         if (l2_filter->l2_ref_cnt > 0)
590                 l2_filter->l2_ref_cnt--;
591
592         if (l2_filter->l2_ref_cnt > 0)
593                 return 0;
594
595         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
596
597         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
598
599         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
600
601         HWRM_CHECK_RESULT();
602         HWRM_UNLOCK();
603
604         filter->fw_l2_filter_id = UINT64_MAX;
605         if (l2_filter->l2_ref_cnt == 0) {
606                 vnic = l2_filter->vnic;
607                 if (vnic) {
608                         STAILQ_REMOVE(&vnic->filter, l2_filter,
609                                       bnxt_filter_info, next);
610                         bnxt_free_filter(bp, l2_filter);
611                 }
612         }
613
614         return 0;
615 }
616
617 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
618                          uint16_t dst_id,
619                          struct bnxt_filter_info *filter)
620 {
621         int rc = 0;
622         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
623         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
624         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
625         const struct rte_eth_vmdq_rx_conf *conf =
626                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
627         uint32_t enables = 0;
628         uint16_t j = dst_id - 1;
629
630         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
631         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
632             conf->pool_map[j].pools & (1UL << j)) {
633                 PMD_DRV_LOG(DEBUG,
634                         "Add vlan %u to vmdq pool %u\n",
635                         conf->pool_map[j].vlan_id, j);
636
637                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
638                 filter->enables |=
639                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
640                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
641         }
642
643         if (filter->fw_l2_filter_id != UINT64_MAX)
644                 bnxt_hwrm_clear_l2_filter(bp, filter);
645
646         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
647
648         /* PMD does not support XDP and RoCE */
649         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
650                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
651         req.flags = rte_cpu_to_le_32(filter->flags);
652
653         enables = filter->enables |
654               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
655         req.dst_id = rte_cpu_to_le_16(dst_id);
656
657         if (enables &
658             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
659                 memcpy(req.l2_addr, filter->l2_addr,
660                        RTE_ETHER_ADDR_LEN);
661         if (enables &
662             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
663                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
664                        RTE_ETHER_ADDR_LEN);
665         if (enables &
666             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
667                 req.l2_ovlan = filter->l2_ovlan;
668         if (enables &
669             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
670                 req.l2_ivlan = filter->l2_ivlan;
671         if (enables &
672             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
673                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
674         if (enables &
675             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
676                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
677         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
678                 req.src_id = rte_cpu_to_le_32(filter->src_id);
679         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
680                 req.src_type = filter->src_type;
681         if (filter->pri_hint) {
682                 req.pri_hint = filter->pri_hint;
683                 req.l2_filter_id_hint =
684                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
685         }
686
687         req.enables = rte_cpu_to_le_32(enables);
688
689         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
690
691         HWRM_CHECK_RESULT();
692
693         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
694         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
695         HWRM_UNLOCK();
696
697         filter->l2_ref_cnt++;
698
699         return rc;
700 }
701
702 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
703 {
704         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
705         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
706         uint32_t flags = 0;
707         int rc;
708
709         if (!ptp)
710                 return 0;
711
712         HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
713
714         if (ptp->rx_filter)
715                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
716         else
717                 flags |=
718                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
719         if (ptp->tx_tstamp_en)
720                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
721         else
722                 flags |=
723                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
724         req.flags = rte_cpu_to_le_32(flags);
725         req.enables = rte_cpu_to_le_32
726                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
727         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
728
729         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
730         HWRM_UNLOCK();
731
732         return rc;
733 }
734
735 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
736 {
737         int rc = 0;
738         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
739         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
740         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
741
742         if (ptp)
743                 return 0;
744
745         HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
746
747         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
748
749         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
750
751         HWRM_CHECK_RESULT();
752
753         if (!BNXT_CHIP_P5(bp) &&
754             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
755                 return 0;
756
757         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
758                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
759
760         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
761         if (!ptp)
762                 return -ENOMEM;
763
764         if (!BNXT_CHIP_P5(bp)) {
765                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
766                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
767                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
768                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
769                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
770                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
771                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
772                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
773                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
774                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
775                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
776                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
777                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
778                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
779                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
780                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
781                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
782                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
783         }
784
785         ptp->bp = bp;
786         bp->ptp_cfg = ptp;
787
788         return 0;
789 }
790
791 void bnxt_hwrm_free_vf_info(struct bnxt *bp)
792 {
793         int i;
794
795         for (i = 0; i < bp->pf->max_vfs; i++) {
796                 rte_free(bp->pf->vf_info[i].vlan_table);
797                 bp->pf->vf_info[i].vlan_table = NULL;
798                 rte_free(bp->pf->vf_info[i].vlan_as_table);
799                 bp->pf->vf_info[i].vlan_as_table = NULL;
800         }
801         rte_free(bp->pf->vf_info);
802         bp->pf->vf_info = NULL;
803 }
804
805 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
806 {
807         int rc = 0;
808         struct hwrm_func_qcaps_input req = {.req_type = 0 };
809         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
810         uint16_t new_max_vfs;
811         uint32_t flags;
812         int i;
813
814         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
815
816         req.fid = rte_cpu_to_le_16(0xffff);
817
818         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
819
820         HWRM_CHECK_RESULT();
821
822         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
823         flags = rte_le_to_cpu_32(resp->flags);
824         if (BNXT_PF(bp)) {
825                 bp->pf->port_id = resp->port_id;
826                 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
827                 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
828                 new_max_vfs = bp->pdev->max_vfs;
829                 if (new_max_vfs != bp->pf->max_vfs) {
830                         if (bp->pf->vf_info)
831                                 bnxt_hwrm_free_vf_info(bp);
832                         bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
833                             sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
834                         if (bp->pf->vf_info == NULL) {
835                                 PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
836                                 HWRM_UNLOCK();
837                                 return -ENOMEM;
838                         }
839                         bp->pf->max_vfs = new_max_vfs;
840                         for (i = 0; i < new_max_vfs; i++) {
841                                 bp->pf->vf_info[i].fid =
842                                         bp->pf->first_vf_id + i;
843                                 bp->pf->vf_info[i].vlan_table =
844                                         rte_zmalloc("VF VLAN table",
845                                                     getpagesize(),
846                                                     getpagesize());
847                                 if (bp->pf->vf_info[i].vlan_table == NULL)
848                                         PMD_DRV_LOG(ERR,
849                                         "Fail to alloc VLAN table for VF %d\n",
850                                         i);
851                                 else
852                                         rte_mem_lock_page(
853                                                 bp->pf->vf_info[i].vlan_table);
854                                 bp->pf->vf_info[i].vlan_as_table =
855                                         rte_zmalloc("VF VLAN AS table",
856                                                     getpagesize(),
857                                                     getpagesize());
858                                 if (bp->pf->vf_info[i].vlan_as_table == NULL)
859                                         PMD_DRV_LOG(ERR,
860                                         "Alloc VLAN AS table for VF %d fail\n",
861                                         i);
862                                 else
863                                         rte_mem_lock_page(
864                                               bp->pf->vf_info[i].vlan_as_table);
865                                 STAILQ_INIT(&bp->pf->vf_info[i].filter);
866                         }
867                 }
868         }
869
870         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
871         if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
872                 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
873                 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
874         } else {
875                 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
876         }
877         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
878         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
879         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
880         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
881         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
882         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
883         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
884         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
885                 bp->max_l2_ctx += bp->max_rx_em_flows;
886         /* TODO: For now, do not support VMDq/RFS on VFs. */
887         if (BNXT_PF(bp)) {
888                 if (bp->pf->max_vfs)
889                         bp->max_vnics = 1;
890                 else
891                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
892         } else {
893                 bp->max_vnics = 1;
894         }
895         PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
896                     bp->max_l2_ctx, bp->max_vnics);
897         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
898         if (BNXT_PF(bp)) {
899                 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
900                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
901                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
902                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
903                         HWRM_UNLOCK();
904                         bnxt_hwrm_ptp_qcfg(bp);
905                 }
906         }
907
908         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
909                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
910
911         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
912                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
913                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
914         }
915
916         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
917                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
918
919         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
920                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
921
922         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
923                 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
924
925         HWRM_UNLOCK();
926
927         return rc;
928 }
929
930 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
931 {
932         int rc;
933
934         rc = __bnxt_hwrm_func_qcaps(bp);
935         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
936                 rc = bnxt_alloc_ctx_mem(bp);
937                 if (rc)
938                         return rc;
939
940                 /* On older FW,
941                  * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
942                  * But the error can be ignored. Return success.
943                  */
944                 rc = bnxt_hwrm_func_resc_qcaps(bp);
945                 if (!rc)
946                         bp->flags |= BNXT_FLAG_NEW_RM;
947         }
948
949         return 0;
950 }
951
952 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
953 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
954 {
955         int rc = 0;
956         uint32_t flags;
957         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
958         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
959
960         HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
961
962         req.target_id = rte_cpu_to_le_16(0xffff);
963
964         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
965
966         HWRM_CHECK_RESULT();
967
968         flags = rte_le_to_cpu_32(resp->flags);
969
970         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
971                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
972                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
973         }
974
975         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
976                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
977
978         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
979                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
980
981         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
982
983         HWRM_UNLOCK();
984
985         return rc;
986 }
987
988 int bnxt_hwrm_func_reset(struct bnxt *bp)
989 {
990         int rc = 0;
991         struct hwrm_func_reset_input req = {.req_type = 0 };
992         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
993
994         HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
995
996         req.enables = rte_cpu_to_le_32(0);
997
998         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
999
1000         HWRM_CHECK_RESULT();
1001         HWRM_UNLOCK();
1002
1003         return rc;
1004 }
1005
1006 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
1007 {
1008         int rc;
1009         uint32_t flags = 0;
1010         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
1011         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
1012
1013         if (bp->flags & BNXT_FLAG_REGISTERED)
1014                 return 0;
1015
1016         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
1017                 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
1018         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1019                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
1020
1021         /* PFs and trusted VFs should indicate the support of the
1022          * Master capability on non Stingray platform
1023          */
1024         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
1025                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
1026
1027         HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
1028         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
1029                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1030         req.ver_maj = RTE_VER_YEAR;
1031         req.ver_min = RTE_VER_MONTH;
1032         req.ver_upd = RTE_VER_MINOR;
1033
1034         if (BNXT_PF(bp)) {
1035                 req.enables |= rte_cpu_to_le_32(
1036                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
1037                 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
1038                        RTE_MIN(sizeof(req.vf_req_fwd),
1039                                sizeof(bp->pf->vf_req_fwd)));
1040         }
1041
1042         req.flags = rte_cpu_to_le_32(flags);
1043
1044         req.async_event_fwd[0] |=
1045                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
1046                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
1047                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
1048                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
1049                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
1050         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1051                 req.async_event_fwd[0] |=
1052                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
1053         req.async_event_fwd[1] |=
1054                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
1055                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
1056         if (BNXT_PF(bp))
1057                 req.async_event_fwd[1] |=
1058                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
1059
1060         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
1061                 req.async_event_fwd[1] |=
1062                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
1063
1064         req.async_event_fwd[2] |=
1065                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST);
1066
1067         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1068
1069         HWRM_CHECK_RESULT();
1070
1071         flags = rte_le_to_cpu_32(resp->flags);
1072         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
1073                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
1074
1075         HWRM_UNLOCK();
1076
1077         bp->flags |= BNXT_FLAG_REGISTERED;
1078
1079         return rc;
1080 }
1081
1082 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
1083 {
1084         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
1085                 return 0;
1086
1087         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
1088 }
1089
1090 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
1091 {
1092         int rc;
1093         uint32_t flags = 0;
1094         uint32_t enables;
1095         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1096         struct hwrm_func_vf_cfg_input req = {0};
1097
1098         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
1099
1100         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
1101                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
1102                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
1103                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1104                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
1105
1106         if (BNXT_HAS_RING_GRPS(bp)) {
1107                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
1108                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
1109         }
1110
1111         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
1112         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
1113                                             AGG_RING_MULTIPLIER);
1114         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
1115         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
1116                                               bp->tx_nr_rings +
1117                                               BNXT_NUM_ASYNC_CPR(bp));
1118         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
1119         if (bp->vf_resv_strategy ==
1120             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1121                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1122                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1123                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1124                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1125                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1126                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1127         } else if (bp->vf_resv_strategy ==
1128                    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1129                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1130                 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1131         }
1132
1133         if (test)
1134                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1135                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1136                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1137                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1138                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1139                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1140
1141         if (test && BNXT_HAS_RING_GRPS(bp))
1142                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1143
1144         req.flags = rte_cpu_to_le_32(flags);
1145         req.enables |= rte_cpu_to_le_32(enables);
1146
1147         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1148
1149         if (test)
1150                 HWRM_CHECK_RESULT_SILENT();
1151         else
1152                 HWRM_CHECK_RESULT();
1153
1154         HWRM_UNLOCK();
1155         return rc;
1156 }
1157
1158 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1159 {
1160         int rc;
1161         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1162         struct hwrm_func_resource_qcaps_input req = {0};
1163
1164         HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1165         req.fid = rte_cpu_to_le_16(0xffff);
1166
1167         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1168
1169         HWRM_CHECK_RESULT_SILENT();
1170
1171         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1172         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1173         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1174         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1175         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1176         /* func_resource_qcaps does not return max_rx_em_flows.
1177          * So use the value provided by func_qcaps.
1178          */
1179         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1180         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
1181                 bp->max_l2_ctx += bp->max_rx_em_flows;
1182         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1183         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1184         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1185         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1186         if (bp->vf_resv_strategy >
1187             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1188                 bp->vf_resv_strategy =
1189                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1190
1191         HWRM_UNLOCK();
1192         return rc;
1193 }
1194
1195 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1196 {
1197         int rc = 0;
1198         struct hwrm_ver_get_input req = {.req_type = 0 };
1199         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1200         uint32_t fw_version;
1201         uint16_t max_resp_len;
1202         char type[RTE_MEMZONE_NAMESIZE];
1203         uint32_t dev_caps_cfg;
1204
1205         bp->max_req_len = HWRM_MAX_REQ_LEN;
1206         bp->hwrm_cmd_timeout = timeout;
1207         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1208
1209         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1210         req.hwrm_intf_min = HWRM_VERSION_MINOR;
1211         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1212
1213         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1214
1215         if (bp->flags & BNXT_FLAG_FW_RESET)
1216                 HWRM_CHECK_RESULT_SILENT();
1217         else
1218                 HWRM_CHECK_RESULT();
1219
1220         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
1221                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1222                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1223                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
1224                 resp->hwrm_fw_rsvd_8b);
1225         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1226                      (resp->hwrm_fw_min_8b << 16) |
1227                      (resp->hwrm_fw_bld_8b << 8) |
1228                      resp->hwrm_fw_rsvd_8b;
1229         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1230                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1231
1232         fw_version = resp->hwrm_intf_maj_8b << 16;
1233         fw_version |= resp->hwrm_intf_min_8b << 8;
1234         fw_version |= resp->hwrm_intf_upd_8b;
1235         bp->hwrm_spec_code = fw_version;
1236
1237         /* def_req_timeout value is in milliseconds */
1238         bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1239         /* convert timeout to usec */
1240         bp->hwrm_cmd_timeout *= 1000;
1241         if (!bp->hwrm_cmd_timeout)
1242                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1243
1244         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1245                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1246                 rc = -EINVAL;
1247                 goto error;
1248         }
1249
1250         if (bp->max_req_len > resp->max_req_win_len) {
1251                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1252                 rc = -EINVAL;
1253         }
1254
1255         bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
1256
1257         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1258         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1259         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1260                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1261
1262         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1263         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1264
1265         if (bp->max_resp_len != max_resp_len) {
1266                 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1267                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1268                         bp->pdev->addr.devid, bp->pdev->addr.function);
1269
1270                 rte_free(bp->hwrm_cmd_resp_addr);
1271
1272                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1273                 if (bp->hwrm_cmd_resp_addr == NULL) {
1274                         rc = -ENOMEM;
1275                         goto error;
1276                 }
1277                 bp->hwrm_cmd_resp_dma_addr =
1278                         rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1279                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1280                         PMD_DRV_LOG(ERR,
1281                         "Unable to map response buffer to physical memory.\n");
1282                         rc = -ENOMEM;
1283                         goto error;
1284                 }
1285                 bp->max_resp_len = max_resp_len;
1286         }
1287
1288         if ((dev_caps_cfg &
1289                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1290             (dev_caps_cfg &
1291              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1292                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1293                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1294         }
1295
1296         if (((dev_caps_cfg &
1297               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1298              (dev_caps_cfg &
1299               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1300             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1301                 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1302                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1303                         bp->pdev->addr.devid, bp->pdev->addr.function);
1304
1305                 rte_free(bp->hwrm_short_cmd_req_addr);
1306
1307                 bp->hwrm_short_cmd_req_addr =
1308                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1309                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1310                         rc = -ENOMEM;
1311                         goto error;
1312                 }
1313                 bp->hwrm_short_cmd_req_dma_addr =
1314                         rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1315                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1316                         rte_free(bp->hwrm_short_cmd_req_addr);
1317                         PMD_DRV_LOG(ERR,
1318                                 "Unable to map buffer to physical memory.\n");
1319                         rc = -ENOMEM;
1320                         goto error;
1321                 }
1322         }
1323         if (dev_caps_cfg &
1324             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1325                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1326                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1327         }
1328         if (dev_caps_cfg &
1329             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1330                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1331         if (dev_caps_cfg &
1332             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1333                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1334                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1335         }
1336
1337         if (dev_caps_cfg &
1338             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1339                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1340                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1341         }
1342
1343 error:
1344         HWRM_UNLOCK();
1345         return rc;
1346 }
1347
1348 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1349 {
1350         int rc;
1351         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1352         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1353
1354         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1355                 return 0;
1356
1357         HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1358         req.flags = flags;
1359
1360         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1361
1362         HWRM_CHECK_RESULT();
1363         HWRM_UNLOCK();
1364
1365         return rc;
1366 }
1367
1368 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1369 {
1370         int rc = 0;
1371         struct hwrm_port_phy_cfg_input req = {0};
1372         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1373         uint32_t enables = 0;
1374
1375         HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1376
1377         if (conf->link_up) {
1378                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1379                 if (bp->link_info->auto_mode && conf->link_speed) {
1380                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1381                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1382                 }
1383
1384                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1385                 /*
1386                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1387                  * any auto mode, even "none".
1388                  */
1389                 if (!conf->link_speed) {
1390                         /* No speeds specified. Enable AutoNeg - all speeds */
1391                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1392                         req.auto_mode =
1393                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1394                 } else {
1395                         if (bp->link_info->link_signal_mode) {
1396                                 enables |=
1397                                 HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1398                                 req.force_pam4_link_speed =
1399                                         rte_cpu_to_le_16(conf->link_speed);
1400                         } else {
1401                                 req.force_link_speed =
1402                                         rte_cpu_to_le_16(conf->link_speed);
1403                         }
1404                 }
1405                 /* AutoNeg - Advertise speeds specified. */
1406                 if (conf->auto_link_speed_mask &&
1407                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1408                         req.auto_mode =
1409                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1410                         req.auto_link_speed_mask =
1411                                 conf->auto_link_speed_mask;
1412                         if (conf->auto_pam4_link_speeds) {
1413                                 enables |=
1414                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1415                                 req.auto_link_pam4_speed_mask =
1416                                         conf->auto_pam4_link_speeds;
1417                         } else {
1418                                 enables |=
1419                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1420                         }
1421                 }
1422                 if (conf->auto_link_speed &&
1423                 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1424                         enables |=
1425                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1426
1427                 req.auto_duplex = conf->duplex;
1428                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1429                 req.auto_pause = conf->auto_pause;
1430                 req.force_pause = conf->force_pause;
1431                 /* Set force_pause if there is no auto or if there is a force */
1432                 if (req.auto_pause && !req.force_pause)
1433                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1434                 else
1435                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1436
1437                 req.enables = rte_cpu_to_le_32(enables);
1438         } else {
1439                 req.flags =
1440                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1441                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1442         }
1443
1444         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1445
1446         HWRM_CHECK_RESULT();
1447         HWRM_UNLOCK();
1448
1449         PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n",
1450                     bp->eth_dev->data->port_id);
1451         return rc;
1452 }
1453
1454 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1455                                    struct bnxt_link_info *link_info)
1456 {
1457         int rc = 0;
1458         struct hwrm_port_phy_qcfg_input req = {0};
1459         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1460
1461         HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1462
1463         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1464
1465         HWRM_CHECK_RESULT();
1466
1467         link_info->phy_link_status = resp->link;
1468         link_info->link_up =
1469                 (link_info->phy_link_status ==
1470                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1471         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1472         link_info->duplex = resp->duplex_cfg;
1473         link_info->pause = resp->pause;
1474         link_info->auto_pause = resp->auto_pause;
1475         link_info->force_pause = resp->force_pause;
1476         link_info->auto_mode = resp->auto_mode;
1477         link_info->phy_type = resp->phy_type;
1478         link_info->media_type = resp->media_type;
1479
1480         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1481         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1482         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1483         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1484         link_info->phy_ver[0] = resp->phy_maj;
1485         link_info->phy_ver[1] = resp->phy_min;
1486         link_info->phy_ver[2] = resp->phy_bld;
1487         link_info->link_signal_mode =
1488                 rte_le_to_cpu_16(resp->active_fec_signal_mode);
1489         link_info->force_pam4_link_speed =
1490                         rte_le_to_cpu_16(resp->force_pam4_link_speed);
1491         link_info->support_pam4_speeds =
1492                         rte_le_to_cpu_16(resp->support_pam4_speeds);
1493         link_info->auto_pam4_link_speeds =
1494                         rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1495         HWRM_UNLOCK();
1496
1497         PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1498                     link_info->link_speed, link_info->auto_mode,
1499                     link_info->auto_link_speed, link_info->auto_link_speed_mask,
1500                     link_info->support_speeds, link_info->force_link_speed);
1501         PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
1502                     link_info->link_signal_mode,
1503                     link_info->auto_pam4_link_speeds,
1504                     link_info->support_pam4_speeds,
1505                     link_info->force_pam4_link_speed);
1506         return rc;
1507 }
1508
1509 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1510 {
1511         int rc = 0;
1512         struct hwrm_port_phy_qcaps_input req = {0};
1513         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1514         struct bnxt_link_info *link_info = bp->link_info;
1515
1516         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1517                 return 0;
1518
1519         HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1520
1521         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1522
1523         HWRM_CHECK_RESULT();
1524
1525         bp->port_cnt = resp->port_cnt;
1526         if (resp->supported_speeds_auto_mode)
1527                 link_info->support_auto_speeds =
1528                         rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1529         if (resp->supported_pam4_speeds_auto_mode)
1530                 link_info->support_pam4_auto_speeds =
1531                         rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1532
1533         HWRM_UNLOCK();
1534
1535         return 0;
1536 }
1537
1538 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1539 {
1540         int i = 0;
1541
1542         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1543                 if (bp->tx_cos_queue[i].profile ==
1544                     HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1545                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1546                         return true;
1547                 }
1548         }
1549         return false;
1550 }
1551
1552 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1553 {
1554         int i = 0;
1555
1556         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1557                 if (bp->tx_cos_queue[i].profile !=
1558                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1559                     bp->tx_cos_queue[i].id !=
1560                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1561                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1562                         break;
1563                 }
1564         }
1565 }
1566
1567 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1568 {
1569         int rc = 0;
1570         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1571         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1572         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1573         int i;
1574
1575 get_rx_info:
1576         HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1577
1578         req.flags = rte_cpu_to_le_32(dir);
1579         /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1580         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1581             !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1582                 req.drv_qmap_cap =
1583                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1584         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1585
1586         HWRM_CHECK_RESULT();
1587
1588         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1589                 GET_TX_QUEUE_INFO(0);
1590                 GET_TX_QUEUE_INFO(1);
1591                 GET_TX_QUEUE_INFO(2);
1592                 GET_TX_QUEUE_INFO(3);
1593                 GET_TX_QUEUE_INFO(4);
1594                 GET_TX_QUEUE_INFO(5);
1595                 GET_TX_QUEUE_INFO(6);
1596                 GET_TX_QUEUE_INFO(7);
1597         } else  {
1598                 GET_RX_QUEUE_INFO(0);
1599                 GET_RX_QUEUE_INFO(1);
1600                 GET_RX_QUEUE_INFO(2);
1601                 GET_RX_QUEUE_INFO(3);
1602                 GET_RX_QUEUE_INFO(4);
1603                 GET_RX_QUEUE_INFO(5);
1604                 GET_RX_QUEUE_INFO(6);
1605                 GET_RX_QUEUE_INFO(7);
1606         }
1607
1608         HWRM_UNLOCK();
1609
1610         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1611                 goto done;
1612
1613         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1614                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1615         } else {
1616                 int j;
1617
1618                 /* iterate and find the COSq profile to use for Tx */
1619                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1620                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1621                                 if (bp->tx_cos_queue[i].id != 0xff)
1622                                         bp->tx_cosq_id[j++] =
1623                                                 bp->tx_cos_queue[i].id;
1624                         }
1625                 } else {
1626                         /* When CoS classification is disabled, for normal NIC
1627                          * operations, ideally we should look to use LOSSY.
1628                          * If not found, fallback to the first valid profile
1629                          */
1630                         if (!bnxt_find_lossy_profile(bp))
1631                                 bnxt_find_first_valid_profile(bp);
1632
1633                 }
1634         }
1635
1636         bp->max_tc = resp->max_configurable_queues;
1637         bp->max_lltc = resp->max_configurable_lossless_queues;
1638         if (bp->max_tc > BNXT_MAX_QUEUE)
1639                 bp->max_tc = BNXT_MAX_QUEUE;
1640         bp->max_q = bp->max_tc;
1641
1642         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1643                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1644                 goto get_rx_info;
1645         }
1646
1647 done:
1648         return rc;
1649 }
1650
1651 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1652                          struct bnxt_ring *ring,
1653                          uint32_t ring_type, uint32_t map_index,
1654                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1655                          uint16_t tx_cosq_id)
1656 {
1657         int rc = 0;
1658         uint32_t enables = 0;
1659         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1660         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1661         struct rte_mempool *mb_pool;
1662         uint16_t rx_buf_size;
1663
1664         HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1665
1666         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1667         req.fbo = rte_cpu_to_le_32(0);
1668         /* Association of ring index with doorbell index */
1669         req.logical_id = rte_cpu_to_le_16(map_index);
1670         req.length = rte_cpu_to_le_32(ring->ring_size);
1671
1672         switch (ring_type) {
1673         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1674                 req.ring_type = ring_type;
1675                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1676                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1677                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1678                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1679                         enables |=
1680                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1681                 break;
1682         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1683                 req.ring_type = ring_type;
1684                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1685                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1686                 if (BNXT_CHIP_P5(bp)) {
1687                         mb_pool = bp->rx_queues[0]->mb_pool;
1688                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1689                                       RTE_PKTMBUF_HEADROOM;
1690                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1691                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1692                         enables |=
1693                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1694                 }
1695                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1696                         enables |=
1697                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1698                 break;
1699         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1700                 req.ring_type = ring_type;
1701                 if (BNXT_HAS_NQ(bp)) {
1702                         /* Association of cp ring with nq */
1703                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1704                         enables |=
1705                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1706                 }
1707                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1708                 break;
1709         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1710                 req.ring_type = ring_type;
1711                 req.page_size = BNXT_PAGE_SHFT;
1712                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1713                 break;
1714         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1715                 req.ring_type = ring_type;
1716                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1717
1718                 mb_pool = bp->rx_queues[0]->mb_pool;
1719                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1720                               RTE_PKTMBUF_HEADROOM;
1721                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1722                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1723
1724                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1725                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1726                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1727                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1728                 break;
1729         default:
1730                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1731                         ring_type);
1732                 HWRM_UNLOCK();
1733                 return -EINVAL;
1734         }
1735         req.enables = rte_cpu_to_le_32(enables);
1736
1737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1738
1739         if (rc || resp->error_code) {
1740                 if (rc == 0 && resp->error_code)
1741                         rc = rte_le_to_cpu_16(resp->error_code);
1742                 switch (ring_type) {
1743                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1744                         PMD_DRV_LOG(ERR,
1745                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1746                         HWRM_UNLOCK();
1747                         return rc;
1748                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1749                         PMD_DRV_LOG(ERR,
1750                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1751                         HWRM_UNLOCK();
1752                         return rc;
1753                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1754                         PMD_DRV_LOG(ERR,
1755                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1756                                     rc);
1757                         HWRM_UNLOCK();
1758                         return rc;
1759                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1760                         PMD_DRV_LOG(ERR,
1761                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1762                         HWRM_UNLOCK();
1763                         return rc;
1764                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1765                         PMD_DRV_LOG(ERR,
1766                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1767                         HWRM_UNLOCK();
1768                         return rc;
1769                 default:
1770                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1771                         HWRM_UNLOCK();
1772                         return rc;
1773                 }
1774         }
1775
1776         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1777         HWRM_UNLOCK();
1778         return rc;
1779 }
1780
1781 int bnxt_hwrm_ring_free(struct bnxt *bp,
1782                         struct bnxt_ring *ring, uint32_t ring_type,
1783                         uint16_t cp_ring_id)
1784 {
1785         int rc;
1786         struct hwrm_ring_free_input req = {.req_type = 0 };
1787         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1788
1789         HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1790
1791         req.ring_type = ring_type;
1792         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1793         req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id);
1794
1795         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1796
1797         if (rc || resp->error_code) {
1798                 if (rc == 0 && resp->error_code)
1799                         rc = rte_le_to_cpu_16(resp->error_code);
1800                 HWRM_UNLOCK();
1801
1802                 switch (ring_type) {
1803                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1804                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1805                                 rc);
1806                         return rc;
1807                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1808                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1809                                 rc);
1810                         return rc;
1811                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1812                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1813                                 rc);
1814                         return rc;
1815                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1816                         PMD_DRV_LOG(ERR,
1817                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1818                         return rc;
1819                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1820                         PMD_DRV_LOG(ERR,
1821                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1822                         return rc;
1823                 default:
1824                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1825                         return rc;
1826                 }
1827         }
1828         HWRM_UNLOCK();
1829         return 0;
1830 }
1831
1832 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1833 {
1834         int rc = 0;
1835         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1836         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1837
1838         HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1839
1840         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1841         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1842         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1843         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1844
1845         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1846
1847         HWRM_CHECK_RESULT();
1848
1849         bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1850
1851         HWRM_UNLOCK();
1852
1853         return rc;
1854 }
1855
1856 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1857 {
1858         int rc;
1859         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1860         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1861
1862         HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1863
1864         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1865
1866         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1867
1868         HWRM_CHECK_RESULT();
1869         HWRM_UNLOCK();
1870
1871         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1872         return rc;
1873 }
1874
1875 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1876 {
1877         int rc = 0;
1878         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1879         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1880
1881         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1882                 return rc;
1883
1884         HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1885
1886         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1887
1888         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1889
1890         HWRM_CHECK_RESULT();
1891         HWRM_UNLOCK();
1892
1893         return rc;
1894 }
1895
1896 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1897                                 unsigned int idx __rte_unused)
1898 {
1899         int rc;
1900         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1901         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1902
1903         HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1904
1905         req.update_period_ms = rte_cpu_to_le_32(0);
1906
1907         req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1908
1909         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1910
1911         HWRM_CHECK_RESULT();
1912
1913         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1914
1915         HWRM_UNLOCK();
1916
1917         return rc;
1918 }
1919
1920 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1921                                 unsigned int idx __rte_unused)
1922 {
1923         int rc;
1924         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1925         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1926
1927         HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1928
1929         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1930
1931         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1932
1933         HWRM_CHECK_RESULT();
1934         HWRM_UNLOCK();
1935
1936         return rc;
1937 }
1938
1939 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1940 {
1941         int rc = 0, i, j;
1942         struct hwrm_vnic_alloc_input req = { 0 };
1943         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1944
1945         if (!BNXT_HAS_RING_GRPS(bp))
1946                 goto skip_ring_grps;
1947
1948         /* map ring groups to this vnic */
1949         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1950                 vnic->start_grp_id, vnic->end_grp_id);
1951         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1952                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1953
1954         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1955         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1956         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1957         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1958
1959 skip_ring_grps:
1960         vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1961         HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1962
1963         if (vnic->func_default)
1964                 req.flags =
1965                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1966         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1967
1968         HWRM_CHECK_RESULT();
1969
1970         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1971         HWRM_UNLOCK();
1972         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1973         return rc;
1974 }
1975
1976 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1977                                         struct bnxt_vnic_info *vnic,
1978                                         struct bnxt_plcmodes_cfg *pmode)
1979 {
1980         int rc = 0;
1981         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1982         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1983
1984         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1985
1986         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1987
1988         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1989
1990         HWRM_CHECK_RESULT();
1991
1992         pmode->flags = rte_le_to_cpu_32(resp->flags);
1993         /* dflt_vnic bit doesn't exist in the _cfg command */
1994         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1995         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1996         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1997         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1998
1999         HWRM_UNLOCK();
2000
2001         return rc;
2002 }
2003
2004 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
2005                                        struct bnxt_vnic_info *vnic,
2006                                        struct bnxt_plcmodes_cfg *pmode)
2007 {
2008         int rc = 0;
2009         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2010         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2011
2012         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2013                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2014                 return rc;
2015         }
2016
2017         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2018
2019         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2020         req.flags = rte_cpu_to_le_32(pmode->flags);
2021         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
2022         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
2023         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
2024         req.enables = rte_cpu_to_le_32(
2025             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
2026             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
2027             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
2028         );
2029
2030         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2031
2032         HWRM_CHECK_RESULT();
2033         HWRM_UNLOCK();
2034
2035         return rc;
2036 }
2037
2038 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2039 {
2040         int rc = 0;
2041         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
2042         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2043         struct bnxt_plcmodes_cfg pmodes = { 0 };
2044         uint32_t ctx_enable_flag = 0;
2045         uint32_t enables = 0;
2046
2047         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2048                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2049                 return rc;
2050         }
2051
2052         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
2053         if (rc)
2054                 return rc;
2055
2056         HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
2057
2058         if (BNXT_CHIP_P5(bp)) {
2059                 int dflt_rxq = vnic->start_grp_id;
2060                 struct bnxt_rx_ring_info *rxr;
2061                 struct bnxt_cp_ring_info *cpr;
2062                 struct bnxt_rx_queue *rxq;
2063                 int i;
2064
2065                 /*
2066                  * The first active receive ring is used as the VNIC
2067                  * default receive ring. If there are no active receive
2068                  * rings (all corresponding receive queues are stopped),
2069                  * the first receive ring is used.
2070                  */
2071                 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
2072                         rxq = bp->eth_dev->data->rx_queues[i];
2073                         if (rxq->rx_started) {
2074                                 dflt_rxq = i;
2075                                 break;
2076                         }
2077                 }
2078
2079                 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
2080                 rxr = rxq->rx_ring;
2081                 cpr = rxq->cp_ring;
2082
2083                 req.default_rx_ring_id =
2084                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
2085                 req.default_cmpl_ring_id =
2086                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
2087                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
2088                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
2089                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
2090                         enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE;
2091                         req.rx_csum_v2_mode =
2092                                 HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK;
2093                 }
2094                 goto config_mru;
2095         }
2096
2097         /* Only RSS support for now TBD: COS & LB */
2098         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
2099         if (vnic->lb_rule != 0xffff)
2100                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
2101         if (vnic->cos_rule != 0xffff)
2102                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
2103         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
2104                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
2105                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
2106         }
2107         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
2108                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
2109                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
2110         }
2111
2112         enables |= ctx_enable_flag;
2113         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
2114         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
2115         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
2116         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
2117
2118 config_mru:
2119         req.enables = rte_cpu_to_le_32(enables);
2120         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2121         req.mru = rte_cpu_to_le_16(vnic->mru);
2122         /* Configure default VNIC only once. */
2123         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
2124                 req.flags |=
2125                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2126                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
2127         }
2128         if (vnic->vlan_strip)
2129                 req.flags |=
2130                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2131         if (vnic->bd_stall)
2132                 req.flags |=
2133                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2134         if (vnic->rss_dflt_cr)
2135                 req.flags |= rte_cpu_to_le_32(
2136                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2137
2138         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2139
2140         HWRM_CHECK_RESULT();
2141         HWRM_UNLOCK();
2142
2143         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2144
2145         return rc;
2146 }
2147
2148 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2149                 int16_t fw_vf_id)
2150 {
2151         int rc = 0;
2152         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2153         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2154
2155         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2156                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2157                 return rc;
2158         }
2159         HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2160
2161         req.enables =
2162                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2163         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2164         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2165
2166         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2167
2168         HWRM_CHECK_RESULT();
2169
2170         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2171         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2172         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2173         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2174         vnic->mru = rte_le_to_cpu_16(resp->mru);
2175         vnic->func_default = rte_le_to_cpu_32(
2176                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2177         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2178                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2179         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2180                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2181         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2182                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2183
2184         HWRM_UNLOCK();
2185
2186         return rc;
2187 }
2188
2189 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2190                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2191 {
2192         int rc = 0;
2193         uint16_t ctx_id;
2194         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2195         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2196                                                 bp->hwrm_cmd_resp_addr;
2197
2198         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2199
2200         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2201         HWRM_CHECK_RESULT();
2202
2203         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2204         if (!BNXT_HAS_RING_GRPS(bp))
2205                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2206         else if (ctx_idx == 0)
2207                 vnic->rss_rule = ctx_id;
2208
2209         HWRM_UNLOCK();
2210
2211         return rc;
2212 }
2213
2214 static
2215 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2216                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2217 {
2218         int rc = 0;
2219         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2220         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2221                                                 bp->hwrm_cmd_resp_addr;
2222
2223         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2224                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2225                 return rc;
2226         }
2227         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2228
2229         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2230
2231         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2232
2233         HWRM_CHECK_RESULT();
2234         HWRM_UNLOCK();
2235
2236         return rc;
2237 }
2238
2239 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2240 {
2241         int rc = 0;
2242
2243         if (BNXT_CHIP_P5(bp)) {
2244                 int j;
2245
2246                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2247                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
2248                                                       vnic,
2249                                                       vnic->fw_grp_ids[j]);
2250                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2251                 }
2252                 vnic->num_lb_ctxts = 0;
2253         } else {
2254                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2255                 vnic->rss_rule = INVALID_HW_RING_ID;
2256         }
2257
2258         return rc;
2259 }
2260
2261 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2262 {
2263         int rc = 0;
2264         struct hwrm_vnic_free_input req = {.req_type = 0 };
2265         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2266
2267         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2268                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2269                 return rc;
2270         }
2271
2272         HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2273
2274         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2275
2276         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2277
2278         HWRM_CHECK_RESULT();
2279         HWRM_UNLOCK();
2280
2281         vnic->fw_vnic_id = INVALID_HW_RING_ID;
2282         /* Configure default VNIC again if necessary. */
2283         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2284                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2285
2286         return rc;
2287 }
2288
2289 static int
2290 bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2291 {
2292         int i;
2293         int rc = 0;
2294         int nr_ctxs = vnic->num_lb_ctxts;
2295         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2296         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2297
2298         for (i = 0; i < nr_ctxs; i++) {
2299                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2300
2301                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2302                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2303                 req.hash_mode_flags = vnic->hash_mode;
2304
2305                 req.hash_key_tbl_addr =
2306                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2307
2308                 req.ring_grp_tbl_addr =
2309                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2310                                          i * HW_HASH_INDEX_SIZE);
2311                 req.ring_table_pair_index = i;
2312                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2313
2314                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2315                                             BNXT_USE_CHIMP_MB);
2316
2317                 HWRM_CHECK_RESULT();
2318                 HWRM_UNLOCK();
2319         }
2320
2321         return rc;
2322 }
2323
2324 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2325                            struct bnxt_vnic_info *vnic)
2326 {
2327         int rc = 0;
2328         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2329         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2330
2331         if (!vnic->rss_table)
2332                 return 0;
2333
2334         if (BNXT_CHIP_P5(bp))
2335                 return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
2336
2337         HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2338
2339         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2340         req.hash_mode_flags = vnic->hash_mode;
2341
2342         req.ring_grp_tbl_addr =
2343             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2344         req.hash_key_tbl_addr =
2345             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2346         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2347         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2348
2349         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2350
2351         HWRM_CHECK_RESULT();
2352         HWRM_UNLOCK();
2353
2354         return rc;
2355 }
2356
2357 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2358                         struct bnxt_vnic_info *vnic)
2359 {
2360         int rc = 0;
2361         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2362         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2363         uint16_t size;
2364
2365         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2366                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2367                 return rc;
2368         }
2369
2370         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2371
2372         req.flags = rte_cpu_to_le_32(
2373                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2374
2375         req.enables = rte_cpu_to_le_32(
2376                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2377
2378         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2379         size -= RTE_PKTMBUF_HEADROOM;
2380         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2381
2382         req.jumbo_thresh = rte_cpu_to_le_16(size);
2383         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2384
2385         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2386
2387         HWRM_CHECK_RESULT();
2388         HWRM_UNLOCK();
2389
2390         return rc;
2391 }
2392
2393 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2394                         struct bnxt_vnic_info *vnic, bool enable)
2395 {
2396         int rc = 0;
2397         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2398         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2399
2400         if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
2401                 if (enable)
2402                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2403                 return -ENOTSUP;
2404         }
2405
2406         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2407                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2408                 return 0;
2409         }
2410
2411         HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2412
2413         if (enable) {
2414                 req.enables = rte_cpu_to_le_32(
2415                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2416                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2417                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2418                 req.flags = rte_cpu_to_le_32(
2419                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2420                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2421                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2422                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2423                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2424                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2425                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2426                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2427                 req.min_agg_len = rte_cpu_to_le_32(512);
2428         }
2429         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2430
2431         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2432
2433         HWRM_CHECK_RESULT();
2434         HWRM_UNLOCK();
2435
2436         return rc;
2437 }
2438
2439 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2440 {
2441         struct hwrm_func_cfg_input req = {0};
2442         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2443         int rc;
2444
2445         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2446         req.enables = rte_cpu_to_le_32(
2447                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2448         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2449         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2450
2451         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2452
2453         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2454         HWRM_CHECK_RESULT();
2455         HWRM_UNLOCK();
2456
2457         bp->pf->vf_info[vf].random_mac = false;
2458
2459         return rc;
2460 }
2461
2462 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2463                                   uint64_t *dropped)
2464 {
2465         int rc = 0;
2466         struct hwrm_func_qstats_input req = {.req_type = 0};
2467         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2468
2469         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2470
2471         req.fid = rte_cpu_to_le_16(fid);
2472
2473         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2474
2475         HWRM_CHECK_RESULT();
2476
2477         if (dropped)
2478                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2479
2480         HWRM_UNLOCK();
2481
2482         return rc;
2483 }
2484
2485 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2486                           struct rte_eth_stats *stats,
2487                           struct hwrm_func_qstats_output *func_qstats)
2488 {
2489         int rc = 0;
2490         struct hwrm_func_qstats_input req = {.req_type = 0};
2491         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2492
2493         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2494
2495         req.fid = rte_cpu_to_le_16(fid);
2496
2497         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2498
2499         HWRM_CHECK_RESULT();
2500         if (func_qstats)
2501                 memcpy(func_qstats, resp,
2502                        sizeof(struct hwrm_func_qstats_output));
2503
2504         if (!stats)
2505                 goto exit;
2506
2507         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2508         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2509         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2510         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2511         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2512         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2513
2514         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2515         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2516         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2517         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2518         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2519         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2520
2521         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2522         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2523         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2524
2525 exit:
2526         HWRM_UNLOCK();
2527
2528         return rc;
2529 }
2530
2531 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2532 {
2533         int rc = 0;
2534         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2535         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2536
2537         HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2538
2539         req.fid = rte_cpu_to_le_16(fid);
2540
2541         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2542
2543         HWRM_CHECK_RESULT();
2544         HWRM_UNLOCK();
2545
2546         return rc;
2547 }
2548
2549 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2550 {
2551         unsigned int i;
2552         int rc = 0;
2553
2554         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2555                 struct bnxt_tx_queue *txq;
2556                 struct bnxt_rx_queue *rxq;
2557                 struct bnxt_cp_ring_info *cpr;
2558
2559                 if (i >= bp->rx_cp_nr_rings) {
2560                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2561                         cpr = txq->cp_ring;
2562                 } else {
2563                         rxq = bp->rx_queues[i];
2564                         cpr = rxq->cp_ring;
2565                 }
2566
2567                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2568                 if (rc)
2569                         return rc;
2570         }
2571         return 0;
2572 }
2573
2574 static int
2575 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2576 {
2577         int rc;
2578         unsigned int i;
2579         struct bnxt_cp_ring_info *cpr;
2580
2581         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2582
2583                 if (i >= bp->rx_cp_nr_rings) {
2584                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2585                 } else {
2586                         cpr = bp->rx_queues[i]->cp_ring;
2587                         if (BNXT_HAS_RING_GRPS(bp))
2588                                 bp->grp_info[i].fw_stats_ctx = -1;
2589                 }
2590                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2591                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2592                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2593                         if (rc)
2594                                 return rc;
2595                 }
2596         }
2597         return 0;
2598 }
2599
2600 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2601 {
2602         unsigned int i;
2603         int rc = 0;
2604
2605         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2606                 struct bnxt_tx_queue *txq;
2607                 struct bnxt_rx_queue *rxq;
2608                 struct bnxt_cp_ring_info *cpr;
2609
2610                 if (i >= bp->rx_cp_nr_rings) {
2611                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2612                         cpr = txq->cp_ring;
2613                 } else {
2614                         rxq = bp->rx_queues[i];
2615                         cpr = rxq->cp_ring;
2616                 }
2617
2618                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2619
2620                 if (rc)
2621                         return rc;
2622         }
2623         return rc;
2624 }
2625
2626 static int
2627 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2628 {
2629         uint16_t idx;
2630         uint32_t rc = 0;
2631
2632         if (!BNXT_HAS_RING_GRPS(bp))
2633                 return 0;
2634
2635         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2636
2637                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2638                         continue;
2639
2640                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2641
2642                 if (rc)
2643                         return rc;
2644         }
2645         return rc;
2646 }
2647
2648 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2649 {
2650         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2651
2652         bnxt_hwrm_ring_free(bp, cp_ring,
2653                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ,
2654                             INVALID_HW_RING_ID);
2655         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2656         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2657                                      sizeof(*cpr->cp_desc_ring));
2658         cpr->cp_raw_cons = 0;
2659         cpr->valid = 0;
2660 }
2661
2662 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2663 {
2664         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2665
2666         bnxt_hwrm_ring_free(bp, cp_ring,
2667                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL,
2668                         INVALID_HW_RING_ID);
2669         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2670         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2671                         sizeof(*cpr->cp_desc_ring));
2672         cpr->cp_raw_cons = 0;
2673         cpr->valid = 0;
2674 }
2675
2676 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2677 {
2678         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2679         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2680         struct bnxt_ring *ring = rxr->rx_ring_struct;
2681         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2682
2683         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2684                 bnxt_hwrm_ring_free(bp, ring,
2685                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX,
2686                                     cpr->cp_ring_struct->fw_ring_id);
2687                 ring->fw_ring_id = INVALID_HW_RING_ID;
2688                 if (BNXT_HAS_RING_GRPS(bp))
2689                         bp->grp_info[queue_index].rx_fw_ring_id =
2690                                                         INVALID_HW_RING_ID;
2691         }
2692         ring = rxr->ag_ring_struct;
2693         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2694                 bnxt_hwrm_ring_free(bp, ring,
2695                                     BNXT_CHIP_P5(bp) ?
2696                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2697                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX,
2698                                     cpr->cp_ring_struct->fw_ring_id);
2699                 if (BNXT_HAS_RING_GRPS(bp))
2700                         bp->grp_info[queue_index].ag_fw_ring_id =
2701                                                         INVALID_HW_RING_ID;
2702         }
2703         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2704                 bnxt_free_cp_ring(bp, cpr);
2705
2706         if (BNXT_HAS_RING_GRPS(bp))
2707                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2708 }
2709
2710 static int
2711 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2712 {
2713         unsigned int i;
2714
2715         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2716                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2717                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2718                 struct bnxt_ring *ring = txr->tx_ring_struct;
2719                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2720
2721                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2722                         bnxt_hwrm_ring_free(bp, ring,
2723                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX,
2724                                         cpr->cp_ring_struct->fw_ring_id);
2725                         ring->fw_ring_id = INVALID_HW_RING_ID;
2726                         memset(txr->tx_desc_ring, 0,
2727                                         txr->tx_ring_struct->ring_size *
2728                                         sizeof(*txr->tx_desc_ring));
2729                         memset(txr->tx_buf_ring, 0,
2730                                         txr->tx_ring_struct->ring_size *
2731                                         sizeof(*txr->tx_buf_ring));
2732                         txr->tx_raw_prod = 0;
2733                         txr->tx_raw_cons = 0;
2734                 }
2735                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2736                         bnxt_free_cp_ring(bp, cpr);
2737                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2738                 }
2739         }
2740
2741         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2742                 bnxt_free_hwrm_rx_ring(bp, i);
2743
2744         return 0;
2745 }
2746
2747 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2748 {
2749         uint16_t i;
2750         uint32_t rc = 0;
2751
2752         if (!BNXT_HAS_RING_GRPS(bp))
2753                 return 0;
2754
2755         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2756                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2757                 if (rc)
2758                         return rc;
2759         }
2760         return rc;
2761 }
2762
2763 /*
2764  * HWRM utility functions
2765  */
2766
2767 void bnxt_free_hwrm_resources(struct bnxt *bp)
2768 {
2769         /* Release memzone */
2770         rte_free(bp->hwrm_cmd_resp_addr);
2771         rte_free(bp->hwrm_short_cmd_req_addr);
2772         bp->hwrm_cmd_resp_addr = NULL;
2773         bp->hwrm_short_cmd_req_addr = NULL;
2774         bp->hwrm_cmd_resp_dma_addr = 0;
2775         bp->hwrm_short_cmd_req_dma_addr = 0;
2776 }
2777
2778 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2779 {
2780         struct rte_pci_device *pdev = bp->pdev;
2781         char type[RTE_MEMZONE_NAMESIZE];
2782
2783         sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2784                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2785         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2786         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2787         if (bp->hwrm_cmd_resp_addr == NULL)
2788                 return -ENOMEM;
2789         bp->hwrm_cmd_resp_dma_addr =
2790                 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2791         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2792                 PMD_DRV_LOG(ERR,
2793                         "unable to map response address to physical memory\n");
2794                 return -ENOMEM;
2795         }
2796         rte_spinlock_init(&bp->hwrm_lock);
2797
2798         return 0;
2799 }
2800
2801 int
2802 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2803 {
2804         int rc = 0;
2805
2806         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2807                 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2808                 if (rc)
2809                         return rc;
2810         } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2811                 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2812                 if (rc)
2813                         return rc;
2814         }
2815
2816         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2817         return rc;
2818 }
2819
2820 static int
2821 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2822 {
2823         struct bnxt_filter_info *filter;
2824         int rc = 0;
2825
2826         STAILQ_FOREACH(filter, &vnic->filter, next) {
2827                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2828                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2829                 bnxt_free_filter(bp, filter);
2830         }
2831         return rc;
2832 }
2833
2834 static int
2835 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2836 {
2837         struct bnxt_filter_info *filter;
2838         struct rte_flow *flow;
2839         int rc = 0;
2840
2841         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2842                 flow = STAILQ_FIRST(&vnic->flow_list);
2843                 filter = flow->filter;
2844                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2845                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2846
2847                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2848                 rte_free(flow);
2849         }
2850         return rc;
2851 }
2852
2853 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2854 {
2855         struct bnxt_filter_info *filter;
2856         int rc = 0;
2857
2858         STAILQ_FOREACH(filter, &vnic->filter, next) {
2859                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2860                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2861                                                      filter);
2862                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2863                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2864                                                          filter);
2865                 else
2866                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2867                                                      filter);
2868                 if (rc)
2869                         break;
2870         }
2871         return rc;
2872 }
2873
2874 static void
2875 bnxt_free_tunnel_ports(struct bnxt *bp)
2876 {
2877         if (bp->vxlan_port_cnt)
2878                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2879                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2880
2881         if (bp->geneve_port_cnt)
2882                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2883                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2884 }
2885
2886 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2887 {
2888         int i;
2889
2890         if (bp->vnic_info == NULL)
2891                 return;
2892
2893         /*
2894          * Cleanup VNICs in reverse order, to make sure the L2 filter
2895          * from vnic0 is last to be cleaned up.
2896          */
2897         for (i = bp->max_vnics - 1; i >= 0; i--) {
2898                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2899
2900                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2901                         continue;
2902
2903                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2904
2905                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2906
2907                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2908
2909                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2910
2911                 bnxt_hwrm_vnic_free(bp, vnic);
2912
2913                 rte_free(vnic->fw_grp_ids);
2914         }
2915         /* Ring resources */
2916         bnxt_free_all_hwrm_rings(bp);
2917         bnxt_free_all_hwrm_ring_grps(bp);
2918         bnxt_free_all_hwrm_stat_ctxs(bp);
2919         bnxt_free_tunnel_ports(bp);
2920 }
2921
2922 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2923 {
2924         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2925
2926         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2927                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2928
2929         switch (conf_link_speed) {
2930         case ETH_LINK_SPEED_10M_HD:
2931         case ETH_LINK_SPEED_100M_HD:
2932                 /* FALLTHROUGH */
2933                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2934         }
2935         return hw_link_duplex;
2936 }
2937
2938 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2939 {
2940         return !conf_link;
2941 }
2942
2943 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2944                                           uint16_t pam4_link)
2945 {
2946         uint16_t eth_link_speed = 0;
2947
2948         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2949                 return ETH_LINK_SPEED_AUTONEG;
2950
2951         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2952         case ETH_LINK_SPEED_100M:
2953         case ETH_LINK_SPEED_100M_HD:
2954                 /* FALLTHROUGH */
2955                 eth_link_speed =
2956                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2957                 break;
2958         case ETH_LINK_SPEED_1G:
2959                 eth_link_speed =
2960                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2961                 break;
2962         case ETH_LINK_SPEED_2_5G:
2963                 eth_link_speed =
2964                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2965                 break;
2966         case ETH_LINK_SPEED_10G:
2967                 eth_link_speed =
2968                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2969                 break;
2970         case ETH_LINK_SPEED_20G:
2971                 eth_link_speed =
2972                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2973                 break;
2974         case ETH_LINK_SPEED_25G:
2975                 eth_link_speed =
2976                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2977                 break;
2978         case ETH_LINK_SPEED_40G:
2979                 eth_link_speed =
2980                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2981                 break;
2982         case ETH_LINK_SPEED_50G:
2983                 eth_link_speed = pam4_link ?
2984                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
2985                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2986                 break;
2987         case ETH_LINK_SPEED_100G:
2988                 eth_link_speed = pam4_link ?
2989                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
2990                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2991                 break;
2992         case ETH_LINK_SPEED_200G:
2993                 eth_link_speed =
2994                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2995                 break;
2996         default:
2997                 PMD_DRV_LOG(ERR,
2998                         "Unsupported link speed %d; default to AUTO\n",
2999                         conf_link_speed);
3000                 break;
3001         }
3002         return eth_link_speed;
3003 }
3004
3005 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
3006                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
3007                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
3008                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
3009                 ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
3010
3011 static int bnxt_validate_link_speed(struct bnxt *bp)
3012 {
3013         uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
3014         uint16_t port_id = bp->eth_dev->data->port_id;
3015         uint32_t link_speed_capa;
3016         uint32_t one_speed;
3017
3018         if (link_speed == ETH_LINK_SPEED_AUTONEG)
3019                 return 0;
3020
3021         link_speed_capa = bnxt_get_speed_capabilities(bp);
3022
3023         if (link_speed & ETH_LINK_SPEED_FIXED) {
3024                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
3025
3026                 if (one_speed & (one_speed - 1)) {
3027                         PMD_DRV_LOG(ERR,
3028                                 "Invalid advertised speeds (%u) for port %u\n",
3029                                 link_speed, port_id);
3030                         return -EINVAL;
3031                 }
3032                 if ((one_speed & link_speed_capa) != one_speed) {
3033                         PMD_DRV_LOG(ERR,
3034                                 "Unsupported advertised speed (%u) for port %u\n",
3035                                 link_speed, port_id);
3036                         return -EINVAL;
3037                 }
3038         } else {
3039                 if (!(link_speed & link_speed_capa)) {
3040                         PMD_DRV_LOG(ERR,
3041                                 "Unsupported advertised speeds (%u) for port %u\n",
3042                                 link_speed, port_id);
3043                         return -EINVAL;
3044                 }
3045         }
3046         return 0;
3047 }
3048
3049 static uint16_t
3050 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
3051 {
3052         uint16_t ret = 0;
3053
3054         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
3055                 if (bp->link_info->support_speeds)
3056                         return bp->link_info->support_speeds;
3057                 link_speed = BNXT_SUPPORTED_SPEEDS;
3058         }
3059
3060         if (link_speed & ETH_LINK_SPEED_100M)
3061                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3062         if (link_speed & ETH_LINK_SPEED_100M_HD)
3063                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
3064         if (link_speed & ETH_LINK_SPEED_1G)
3065                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3066         if (link_speed & ETH_LINK_SPEED_2_5G)
3067                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
3068         if (link_speed & ETH_LINK_SPEED_10G)
3069                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3070         if (link_speed & ETH_LINK_SPEED_20G)
3071                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
3072         if (link_speed & ETH_LINK_SPEED_25G)
3073                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
3074         if (link_speed & ETH_LINK_SPEED_40G)
3075                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
3076         if (link_speed & ETH_LINK_SPEED_50G)
3077                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
3078         if (link_speed & ETH_LINK_SPEED_100G)
3079                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
3080         if (link_speed & ETH_LINK_SPEED_200G)
3081                 ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3082         return ret;
3083 }
3084
3085 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
3086 {
3087         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
3088
3089         switch (hw_link_speed) {
3090         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
3091                 eth_link_speed = ETH_SPEED_NUM_100M;
3092                 break;
3093         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
3094                 eth_link_speed = ETH_SPEED_NUM_1G;
3095                 break;
3096         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
3097                 eth_link_speed = ETH_SPEED_NUM_2_5G;
3098                 break;
3099         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
3100                 eth_link_speed = ETH_SPEED_NUM_10G;
3101                 break;
3102         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
3103                 eth_link_speed = ETH_SPEED_NUM_20G;
3104                 break;
3105         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
3106                 eth_link_speed = ETH_SPEED_NUM_25G;
3107                 break;
3108         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
3109                 eth_link_speed = ETH_SPEED_NUM_40G;
3110                 break;
3111         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
3112                 eth_link_speed = ETH_SPEED_NUM_50G;
3113                 break;
3114         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
3115                 eth_link_speed = ETH_SPEED_NUM_100G;
3116                 break;
3117         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
3118                 eth_link_speed = ETH_SPEED_NUM_200G;
3119                 break;
3120         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
3121         default:
3122                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
3123                         hw_link_speed);
3124                 break;
3125         }
3126         return eth_link_speed;
3127 }
3128
3129 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
3130 {
3131         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3132
3133         switch (hw_link_duplex) {
3134         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3135         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3136                 /* FALLTHROUGH */
3137                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3138                 break;
3139         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3140                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
3141                 break;
3142         default:
3143                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3144                         hw_link_duplex);
3145                 break;
3146         }
3147         return eth_link_duplex;
3148 }
3149
3150 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3151 {
3152         int rc = 0;
3153         struct bnxt_link_info *link_info = bp->link_info;
3154
3155         rc = bnxt_hwrm_port_phy_qcaps(bp);
3156         if (rc)
3157                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3158
3159         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3160         if (rc) {
3161                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3162                 goto exit;
3163         }
3164
3165         if (link_info->link_speed)
3166                 link->link_speed =
3167                         bnxt_parse_hw_link_speed(link_info->link_speed);
3168         else
3169                 link->link_speed = ETH_SPEED_NUM_NONE;
3170         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3171         link->link_status = link_info->link_up;
3172         link->link_autoneg = link_info->auto_mode ==
3173                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3174                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
3175 exit:
3176         return rc;
3177 }
3178
3179 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3180 {
3181         int rc = 0;
3182         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3183         struct bnxt_link_info link_req;
3184         uint16_t speed, autoneg;
3185
3186         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3187                 return 0;
3188
3189         rc = bnxt_validate_link_speed(bp);
3190         if (rc)
3191                 goto error;
3192
3193         memset(&link_req, 0, sizeof(link_req));
3194         link_req.link_up = link_up;
3195         if (!link_up)
3196                 goto port_phy_cfg;
3197
3198         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3199         if (BNXT_CHIP_P5(bp) &&
3200             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3201                 /* 40G is not supported as part of media auto detect.
3202                  * The speed should be forced and autoneg disabled
3203                  * to configure 40G speed.
3204                  */
3205                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3206                 autoneg = 0;
3207         }
3208
3209         /* No auto speeds and no auto_pam4_link. Disable autoneg */
3210         if (bp->link_info->auto_link_speed == 0 &&
3211             bp->link_info->link_signal_mode &&
3212             bp->link_info->auto_pam4_link_speeds == 0)
3213                 autoneg = 0;
3214
3215         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3216                                           bp->link_info->link_signal_mode);
3217         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3218         /* Autoneg can be done only when the FW allows.
3219          * When user configures fixed speed of 40G and later changes to
3220          * any other speed, auto_link_speed/force_link_speed is still set
3221          * to 40G until link comes up at new speed.
3222          */
3223         if (autoneg == 1 &&
3224             !(!BNXT_CHIP_P5(bp) &&
3225               (bp->link_info->auto_link_speed ||
3226                bp->link_info->force_link_speed))) {
3227                 link_req.phy_flags |=
3228                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3229                 link_req.auto_link_speed_mask =
3230                         bnxt_parse_eth_link_speed_mask(bp,
3231                                                        dev_conf->link_speeds);
3232         } else {
3233                 if (bp->link_info->phy_type ==
3234                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3235                     bp->link_info->phy_type ==
3236                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3237                     bp->link_info->media_type ==
3238                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3239                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3240                         return -EINVAL;
3241                 }
3242
3243                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3244                 /* If user wants a particular speed try that first. */
3245                 if (speed)
3246                         link_req.link_speed = speed;
3247                 else if (bp->link_info->force_pam4_link_speed)
3248                         link_req.link_speed =
3249                                 bp->link_info->force_pam4_link_speed;
3250                 else if (bp->link_info->auto_pam4_link_speeds)
3251                         link_req.link_speed =
3252                                 bp->link_info->auto_pam4_link_speeds;
3253                 else if (bp->link_info->support_pam4_speeds)
3254                         link_req.link_speed =
3255                                 bp->link_info->support_pam4_speeds;
3256                 else if (bp->link_info->force_link_speed)
3257                         link_req.link_speed = bp->link_info->force_link_speed;
3258                 else
3259                         link_req.link_speed = bp->link_info->auto_link_speed;
3260                 /* Auto PAM4 link speed is zero, but auto_link_speed is not
3261                  * zero. Use the auto_link_speed.
3262                  */
3263                 if (bp->link_info->auto_link_speed != 0 &&
3264                     bp->link_info->auto_pam4_link_speeds == 0)
3265                         link_req.link_speed = bp->link_info->auto_link_speed;
3266         }
3267         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3268         link_req.auto_pause = bp->link_info->auto_pause;
3269         link_req.force_pause = bp->link_info->force_pause;
3270
3271 port_phy_cfg:
3272         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3273         if (rc) {
3274                 PMD_DRV_LOG(ERR,
3275                         "Set link config failed with rc %d\n", rc);
3276         }
3277
3278 error:
3279         return rc;
3280 }
3281
3282 /* JIRA 22088 */
3283 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3284 {
3285         struct hwrm_func_qcfg_input req = {0};
3286         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3287         uint16_t flags;
3288         int rc = 0;
3289         bp->func_svif = BNXT_SVIF_INVALID;
3290         uint16_t svif_info;
3291
3292         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3293         req.fid = rte_cpu_to_le_16(0xffff);
3294
3295         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3296
3297         HWRM_CHECK_RESULT();
3298
3299         /* Hard Coded.. 0xfff VLAN ID mask */
3300         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3301
3302         svif_info = rte_le_to_cpu_16(resp->svif_info);
3303         if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3304                 bp->func_svif = svif_info &
3305                                      HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3306
3307         flags = rte_le_to_cpu_16(resp->flags);
3308         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3309                 bp->flags |= BNXT_FLAG_MULTI_HOST;
3310
3311         if (BNXT_VF(bp) &&
3312             !BNXT_VF_IS_TRUSTED(bp) &&
3313             (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3314                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3315                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3316         } else if (BNXT_VF(bp) &&
3317                    BNXT_VF_IS_TRUSTED(bp) &&
3318                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3319                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3320                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3321         }
3322
3323         if (mtu)
3324                 *mtu = rte_le_to_cpu_16(resp->mtu);
3325
3326         switch (resp->port_partition_type) {
3327         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3328         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3329         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3330                 /* FALLTHROUGH */
3331                 bp->flags |= BNXT_FLAG_NPAR_PF;
3332                 break;
3333         default:
3334                 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3335                 break;
3336         }
3337
3338         bp->legacy_db_size =
3339                 rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
3340
3341         HWRM_UNLOCK();
3342
3343         return rc;
3344 }
3345
3346 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3347 {
3348         struct hwrm_func_qcfg_input req = {0};
3349         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3350         int rc;
3351
3352         if (!BNXT_VF_IS_TRUSTED(bp))
3353                 return 0;
3354
3355         if (!bp->parent)
3356                 return -EINVAL;
3357
3358         bp->parent->fid = BNXT_PF_FID_INVALID;
3359
3360         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3361
3362         req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3363
3364         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3365
3366         HWRM_CHECK_RESULT();
3367
3368         memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3369         bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3370         bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3371         bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3372
3373         /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3374         if (bp->parent->vnic == 0) {
3375                 PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
3376                 /* Use hard-coded values appropriate for current Wh+ fw. */
3377                 if (bp->parent->fid == 2)
3378                         bp->parent->vnic = 0x100;
3379                 else
3380                         bp->parent->vnic = 1;
3381         }
3382
3383         HWRM_UNLOCK();
3384
3385         return 0;
3386 }
3387
3388 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3389                                  uint16_t *vnic_id, uint16_t *svif)
3390 {
3391         struct hwrm_func_qcfg_input req = {0};
3392         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3393         uint16_t svif_info;
3394         int rc = 0;
3395
3396         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3397         req.fid = rte_cpu_to_le_16(fid);
3398
3399         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3400
3401         HWRM_CHECK_RESULT();
3402
3403         if (vnic_id)
3404                 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3405
3406         svif_info = rte_le_to_cpu_16(resp->svif_info);
3407         if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3408                 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3409
3410         HWRM_UNLOCK();
3411
3412         return rc;
3413 }
3414
3415 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3416 {
3417         struct hwrm_port_mac_qcfg_input req = {0};
3418         struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3419         uint16_t port_svif_info;
3420         int rc;
3421
3422         bp->port_svif = BNXT_SVIF_INVALID;
3423
3424         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3425                 return 0;
3426
3427         HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3428
3429         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3430
3431         HWRM_CHECK_RESULT_SILENT();
3432
3433         port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3434         if (port_svif_info &
3435             HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3436                 bp->port_svif = port_svif_info &
3437                         HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3438
3439         HWRM_UNLOCK();
3440
3441         return 0;
3442 }
3443
3444 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
3445                                  struct bnxt_pf_resource_info *pf_resc)
3446 {
3447         struct hwrm_func_cfg_input req = {0};
3448         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3449         uint32_t enables;
3450         int rc;
3451
3452         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3453                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3454                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3455                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3456                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3457                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3458                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3459                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3460                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3461
3462         if (BNXT_HAS_RING_GRPS(bp)) {
3463                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3464                 req.num_hw_ring_grps =
3465                         rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
3466         } else if (BNXT_HAS_NQ(bp)) {
3467                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3468                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3469         }
3470
3471         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3472         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3473         req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3474         req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
3475         req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
3476         req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
3477         req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
3478         req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
3479         req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
3480         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3481         req.fid = rte_cpu_to_le_16(0xffff);
3482         req.enables = rte_cpu_to_le_32(enables);
3483
3484         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3485
3486         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3487
3488         HWRM_CHECK_RESULT();
3489         HWRM_UNLOCK();
3490
3491         return rc;
3492 }
3493
3494 /* min values are the guaranteed resources and max values are subject
3495  * to availability. The strategy for now is to keep both min & max
3496  * values the same.
3497  */
3498 static void
3499 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
3500                               struct hwrm_func_vf_resource_cfg_input *req,
3501                               int num_vfs)
3502 {
3503         req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3504                                                (num_vfs + 1));
3505         req->min_rsscos_ctx = req->max_rsscos_ctx;
3506         req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3507         req->min_stat_ctx = req->max_stat_ctx;
3508         req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3509                                                (num_vfs + 1));
3510         req->min_cmpl_rings = req->max_cmpl_rings;
3511         req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3512         req->min_tx_rings = req->max_tx_rings;
3513         req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3514         req->min_rx_rings = req->max_rx_rings;
3515         req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3516         req->min_l2_ctxs = req->max_l2_ctxs;
3517         /* TODO: For now, do not support VMDq/RFS on VFs. */
3518         req->max_vnics = rte_cpu_to_le_16(1);
3519         req->min_vnics = req->max_vnics;
3520         req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3521                                                  (num_vfs + 1));
3522         req->min_hw_ring_grps = req->max_hw_ring_grps;
3523         req->flags =
3524          rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
3525 }
3526
3527 static void
3528 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
3529                               struct hwrm_func_cfg_input *req,
3530                               int num_vfs)
3531 {
3532         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3533                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3534                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3535                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3536                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3537                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3538                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3539                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3540                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3541                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3542
3543         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3544                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3545                                     BNXT_NUM_VLANS);
3546         req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3547         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3548                                                 (num_vfs + 1));
3549         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3550         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3551                                                (num_vfs + 1));
3552         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3553         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3554         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3555         /* TODO: For now, do not support VMDq/RFS on VFs. */
3556         req->num_vnics = rte_cpu_to_le_16(1);
3557         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3558                                                  (num_vfs + 1));
3559 }
3560
3561 /* Update the port wide resource values based on how many resources
3562  * got allocated to the VF.
3563  */
3564 static int bnxt_update_max_resources(struct bnxt *bp,
3565                                      int vf)
3566 {
3567         struct hwrm_func_qcfg_input req = {0};
3568         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3569         int rc;
3570
3571         /* Get the actual allocated values now */
3572         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3573         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3574         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3575         HWRM_CHECK_RESULT();
3576
3577         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3578         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
3579         bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3580         bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
3581         bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
3582         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
3583         bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3584
3585         HWRM_UNLOCK();
3586
3587         return 0;
3588 }
3589
3590 /* Update the PF resource values based on how many resources
3591  * got allocated to it.
3592  */
3593 static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
3594 {
3595         struct hwrm_func_qcfg_input req = {0};
3596         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3597         int rc;
3598
3599         /* Get the actual allocated values now */
3600         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3601         req.fid = rte_cpu_to_le_16(0xffff);
3602         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3603         HWRM_CHECK_RESULT();
3604
3605         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3606         bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3607         bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3608         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3609         bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3610         bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3611         bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3612         bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
3613
3614         HWRM_UNLOCK();
3615
3616         return 0;
3617 }
3618
3619 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3620 {
3621         struct hwrm_func_qcfg_input req = {0};
3622         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3623         int rc;
3624
3625         /* Check for zero MAC address */
3626         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3627         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3628         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3629         HWRM_CHECK_RESULT();
3630         rc = rte_le_to_cpu_16(resp->vlan);
3631
3632         HWRM_UNLOCK();
3633
3634         return rc;
3635 }
3636
3637 static int bnxt_query_pf_resources(struct bnxt *bp,
3638                                    struct bnxt_pf_resource_info *pf_resc)
3639 {
3640         struct hwrm_func_qcfg_input req = {0};
3641         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3642         int rc;
3643
3644         /* And copy the allocated numbers into the pf struct */
3645         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3646         req.fid = rte_cpu_to_le_16(0xffff);
3647         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3648         HWRM_CHECK_RESULT();
3649
3650         pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3651         pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3652         pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3653         pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3654         pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3655         pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3656         pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
3657         bp->pf->evb_mode = resp->evb_mode;
3658
3659         HWRM_UNLOCK();
3660
3661         return rc;
3662 }
3663
3664 static void
3665 bnxt_calculate_pf_resources(struct bnxt *bp,
3666                             struct bnxt_pf_resource_info *pf_resc,
3667                             int num_vfs)
3668 {
3669         if (!num_vfs) {
3670                 pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
3671                 pf_resc->num_stat_ctxs = bp->max_stat_ctx;
3672                 pf_resc->num_cp_rings = bp->max_cp_rings;
3673                 pf_resc->num_tx_rings = bp->max_tx_rings;
3674                 pf_resc->num_rx_rings = bp->max_rx_rings;
3675                 pf_resc->num_l2_ctxs = bp->max_l2_ctx;
3676                 pf_resc->num_hw_ring_grps = bp->max_ring_grps;
3677
3678                 return;
3679         }
3680
3681         pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
3682                                    bp->max_rsscos_ctx % (num_vfs + 1);
3683         pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
3684                                  bp->max_stat_ctx % (num_vfs + 1);
3685         pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
3686                                 bp->max_cp_rings % (num_vfs + 1);
3687         pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
3688                                 bp->max_tx_rings % (num_vfs + 1);
3689         pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
3690                                 bp->max_rx_rings % (num_vfs + 1);
3691         pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
3692                                bp->max_l2_ctx % (num_vfs + 1);
3693         pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
3694                                     bp->max_ring_grps % (num_vfs + 1);
3695 }
3696
3697 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3698 {
3699         struct bnxt_pf_resource_info pf_resc = { 0 };
3700         int rc;
3701
3702         if (!BNXT_PF(bp)) {
3703                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3704                 return -EINVAL;
3705         }
3706
3707         rc = bnxt_hwrm_func_qcaps(bp);
3708         if (rc)
3709                 return rc;
3710
3711         bnxt_calculate_pf_resources(bp, &pf_resc, 0);
3712
3713         bp->pf->func_cfg_flags &=
3714                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3715                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3716         bp->pf->func_cfg_flags |=
3717                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3718
3719         rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
3720         if (rc)
3721                 return rc;
3722
3723         rc = bnxt_update_max_resources_pf_only(bp);
3724
3725         return rc;
3726 }
3727
3728 static int
3729 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
3730 {
3731         size_t req_buf_sz, sz;
3732         int i, rc;
3733
3734         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3735         bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3736                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3737         if (bp->pf->vf_req_buf == NULL) {
3738                 return -ENOMEM;
3739         }
3740
3741         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3742                 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3743
3744         for (i = 0; i < num_vfs; i++)
3745                 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3746                                              (i * HWRM_MAX_REQ_LEN);
3747
3748         rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
3749         if (rc)
3750                 rte_free(bp->pf->vf_req_buf);
3751
3752         return rc;
3753 }
3754
3755 static int
3756 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
3757 {
3758         struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3759         struct hwrm_func_vf_resource_cfg_input req = {0};
3760         int i, rc = 0;
3761
3762         bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
3763         bp->pf->active_vfs = 0;
3764         for (i = 0; i < num_vfs; i++) {
3765                 HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
3766                 req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3767                 rc = bnxt_hwrm_send_message(bp,
3768                                             &req,
3769                                             sizeof(req),
3770                                             BNXT_USE_CHIMP_MB);
3771                 if (rc || resp->error_code) {
3772                         PMD_DRV_LOG(ERR,
3773                                 "Failed to initialize VF %d\n", i);
3774                         PMD_DRV_LOG(ERR,
3775                                 "Not all VFs available. (%d, %d)\n",
3776                                 rc, resp->error_code);
3777                         HWRM_UNLOCK();
3778
3779                         /* If the first VF configuration itself fails,
3780                          * unregister the vf_fwd_request buffer.
3781                          */
3782                         if (i == 0)
3783                                 bnxt_hwrm_func_buf_unrgtr(bp);
3784                         break;
3785                 }
3786                 HWRM_UNLOCK();
3787
3788                 /* Update the max resource values based on the resource values
3789                  * allocated to the VF.
3790                  */
3791                 bnxt_update_max_resources(bp, i);
3792                 bp->pf->active_vfs++;
3793                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3794         }
3795
3796         return 0;
3797 }
3798
3799 static int
3800 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
3801 {
3802         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3803         struct hwrm_func_cfg_input req = {0};
3804         int i, rc;
3805
3806         bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
3807
3808         bp->pf->active_vfs = 0;
3809         for (i = 0; i < num_vfs; i++) {
3810                 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3811                 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3812                 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3813                 rc = bnxt_hwrm_send_message(bp,
3814                                             &req,
3815                                             sizeof(req),
3816                                             BNXT_USE_CHIMP_MB);
3817
3818                 /* Clear enable flag for next pass */
3819                 req.enables &= ~rte_cpu_to_le_32(
3820                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3821
3822                 if (rc || resp->error_code) {
3823                         PMD_DRV_LOG(ERR,
3824                                 "Failed to initialize VF %d\n", i);
3825                         PMD_DRV_LOG(ERR,
3826                                 "Not all VFs available. (%d, %d)\n",
3827                                 rc, resp->error_code);
3828                         HWRM_UNLOCK();
3829
3830                         /* If the first VF configuration itself fails,
3831                          * unregister the vf_fwd_request buffer.
3832                          */
3833                         if (i == 0)
3834                                 bnxt_hwrm_func_buf_unrgtr(bp);
3835                         break;
3836                 }
3837
3838                 HWRM_UNLOCK();
3839
3840                 /* Update the max resource values based on the resource values
3841                  * allocated to the VF.
3842                  */
3843                 bnxt_update_max_resources(bp, i);
3844                 bp->pf->active_vfs++;
3845                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3846         }
3847
3848         return 0;
3849 }
3850
3851 static void
3852 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
3853 {
3854         if (bp->flags & BNXT_FLAG_NEW_RM)
3855                 bnxt_process_vf_resc_config_new(bp, num_vfs);
3856         else
3857                 bnxt_process_vf_resc_config_old(bp, num_vfs);
3858 }
3859
3860 static void
3861 bnxt_update_pf_resources(struct bnxt *bp,
3862                          struct bnxt_pf_resource_info *pf_resc)
3863 {
3864         bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
3865         bp->max_stat_ctx = pf_resc->num_stat_ctxs;
3866         bp->max_cp_rings = pf_resc->num_cp_rings;
3867         bp->max_tx_rings = pf_resc->num_tx_rings;
3868         bp->max_rx_rings = pf_resc->num_rx_rings;
3869         bp->max_ring_grps = pf_resc->num_hw_ring_grps;
3870 }
3871
3872 static int32_t
3873 bnxt_configure_pf_resources(struct bnxt *bp,
3874                             struct bnxt_pf_resource_info *pf_resc)
3875 {
3876         /*
3877          * We're using STD_TX_RING_MODE here which will limit the TX
3878          * rings. This will allow QoS to function properly. Not setting this
3879          * will cause PF rings to break bandwidth settings.
3880          */
3881         bp->pf->func_cfg_flags &=
3882                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3883                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3884         bp->pf->func_cfg_flags |=
3885                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3886         return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
3887 }
3888
3889 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3890 {
3891         struct bnxt_pf_resource_info pf_resc = { 0 };
3892         int rc;
3893
3894         if (!BNXT_PF(bp)) {
3895                 PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3896                 return -EINVAL;
3897         }
3898
3899         rc = bnxt_hwrm_func_qcaps(bp);
3900         if (rc)
3901                 return rc;
3902
3903         bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
3904
3905         rc = bnxt_configure_pf_resources(bp, &pf_resc);
3906         if (rc)
3907                 return rc;
3908
3909         rc = bnxt_query_pf_resources(bp, &pf_resc);
3910         if (rc)
3911                 return rc;
3912
3913         /*
3914          * Now, create and register a buffer to hold forwarded VF requests
3915          */
3916         rc = bnxt_configure_vf_req_buf(bp, num_vfs);
3917         if (rc)
3918                 return rc;
3919
3920         bnxt_configure_vf_resources(bp, num_vfs);
3921
3922         bnxt_update_pf_resources(bp, &pf_resc);
3923
3924         return 0;
3925 }
3926
3927 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3928 {
3929         struct hwrm_func_cfg_input req = {0};
3930         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3931         int rc;
3932
3933         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3934
3935         req.fid = rte_cpu_to_le_16(0xffff);
3936         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3937         req.evb_mode = bp->pf->evb_mode;
3938
3939         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3940         HWRM_CHECK_RESULT();
3941         HWRM_UNLOCK();
3942
3943         return rc;
3944 }
3945
3946 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3947                                 uint8_t tunnel_type)
3948 {
3949         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3950         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3951         int rc = 0;
3952
3953         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3954         req.tunnel_type = tunnel_type;
3955         req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3956         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3957         HWRM_CHECK_RESULT();
3958
3959         switch (tunnel_type) {
3960         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3961                 bp->vxlan_fw_dst_port_id =
3962                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3963                 bp->vxlan_port = port;
3964                 break;
3965         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3966                 bp->geneve_fw_dst_port_id =
3967                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3968                 bp->geneve_port = port;
3969                 break;
3970         default:
3971                 break;
3972         }
3973
3974         HWRM_UNLOCK();
3975
3976         return rc;
3977 }
3978
3979 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3980                                 uint8_t tunnel_type)
3981 {
3982         struct hwrm_tunnel_dst_port_free_input req = {0};
3983         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3984         int rc = 0;
3985
3986         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3987
3988         req.tunnel_type = tunnel_type;
3989         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3990         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3991
3992         HWRM_CHECK_RESULT();
3993         HWRM_UNLOCK();
3994
3995         if (tunnel_type ==
3996             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
3997                 bp->vxlan_port = 0;
3998                 bp->vxlan_port_cnt = 0;
3999         }
4000
4001         if (tunnel_type ==
4002             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
4003                 bp->geneve_port = 0;
4004                 bp->geneve_port_cnt = 0;
4005         }
4006
4007         return rc;
4008 }
4009
4010 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
4011                                         uint32_t flags)
4012 {
4013         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4014         struct hwrm_func_cfg_input req = {0};
4015         int rc;
4016
4017         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4018
4019         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4020         req.flags = rte_cpu_to_le_32(flags);
4021         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4022
4023         HWRM_CHECK_RESULT();
4024         HWRM_UNLOCK();
4025
4026         return rc;
4027 }
4028
4029 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
4030 {
4031         uint32_t *flag = flagp;
4032
4033         vnic->flags = *flag;
4034 }
4035
4036 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4037 {
4038         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
4039 }
4040
4041 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
4042 {
4043         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4044         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
4045         int rc;
4046
4047         HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
4048
4049         req.req_buf_num_pages = rte_cpu_to_le_16(1);
4050         req.req_buf_page_size =
4051                 rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
4052         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
4053         req.req_buf_page_addr0 =
4054                 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
4055         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
4056                 PMD_DRV_LOG(ERR,
4057                         "unable to map buffer address to physical memory\n");
4058                 HWRM_UNLOCK();
4059                 return -ENOMEM;
4060         }
4061
4062         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4063
4064         HWRM_CHECK_RESULT();
4065         HWRM_UNLOCK();
4066
4067         return rc;
4068 }
4069
4070 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
4071 {
4072         int rc = 0;
4073         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
4074         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
4075
4076         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
4077                 return 0;
4078
4079         HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
4080
4081         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4082
4083         HWRM_CHECK_RESULT();
4084         HWRM_UNLOCK();
4085
4086         return rc;
4087 }
4088
4089 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
4090 {
4091         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4092         struct hwrm_func_cfg_input req = {0};
4093         int rc;
4094
4095         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4096
4097         req.fid = rte_cpu_to_le_16(0xffff);
4098         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
4099         req.enables = rte_cpu_to_le_32(
4100                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
4101         req.async_event_cr = rte_cpu_to_le_16(
4102                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4103         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4104
4105         HWRM_CHECK_RESULT();
4106         HWRM_UNLOCK();
4107
4108         return rc;
4109 }
4110
4111 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
4112 {
4113         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4114         struct hwrm_func_vf_cfg_input req = {0};
4115         int rc;
4116
4117         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4118
4119         req.enables = rte_cpu_to_le_32(
4120                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
4121         req.async_event_cr = rte_cpu_to_le_16(
4122                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4123         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4124
4125         HWRM_CHECK_RESULT();
4126         HWRM_UNLOCK();
4127
4128         return rc;
4129 }
4130
4131 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
4132 {
4133         struct hwrm_func_cfg_input req = {0};
4134         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4135         uint16_t dflt_vlan, fid;
4136         uint32_t func_cfg_flags;
4137         int rc = 0;
4138
4139         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4140
4141         if (is_vf) {
4142                 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
4143                 fid = bp->pf->vf_info[vf].fid;
4144                 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
4145         } else {
4146                 fid = rte_cpu_to_le_16(0xffff);
4147                 func_cfg_flags = bp->pf->func_cfg_flags;
4148                 dflt_vlan = bp->vlan;
4149         }
4150
4151         req.flags = rte_cpu_to_le_32(func_cfg_flags);
4152         req.fid = rte_cpu_to_le_16(fid);
4153         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4154         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
4155
4156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4157
4158         HWRM_CHECK_RESULT();
4159         HWRM_UNLOCK();
4160
4161         return rc;
4162 }
4163
4164 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
4165                         uint16_t max_bw, uint16_t enables)
4166 {
4167         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4168         struct hwrm_func_cfg_input req = {0};
4169         int rc;
4170
4171         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4172
4173         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4174         req.enables |= rte_cpu_to_le_32(enables);
4175         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4176         req.max_bw = rte_cpu_to_le_32(max_bw);
4177         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4178
4179         HWRM_CHECK_RESULT();
4180         HWRM_UNLOCK();
4181
4182         return rc;
4183 }
4184
4185 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
4186 {
4187         struct hwrm_func_cfg_input req = {0};
4188         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4189         int rc = 0;
4190
4191         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4192
4193         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4194         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4195         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4196         req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
4197
4198         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4199
4200         HWRM_CHECK_RESULT();
4201         HWRM_UNLOCK();
4202
4203         return rc;
4204 }
4205
4206 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
4207 {
4208         int rc;
4209
4210         if (BNXT_PF(bp))
4211                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
4212         else
4213                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
4214
4215         return rc;
4216 }
4217
4218 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
4219                               void *encaped, size_t ec_size)
4220 {
4221         int rc = 0;
4222         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
4223         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4224
4225         if (ec_size > sizeof(req.encap_request))
4226                 return -1;
4227
4228         HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
4229
4230         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4231         memcpy(req.encap_request, encaped, ec_size);
4232
4233         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4234
4235         HWRM_CHECK_RESULT();
4236         HWRM_UNLOCK();
4237
4238         return rc;
4239 }
4240
4241 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
4242                                        struct rte_ether_addr *mac)
4243 {
4244         struct hwrm_func_qcfg_input req = {0};
4245         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4246         int rc;
4247
4248         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4249
4250         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4251         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4252
4253         HWRM_CHECK_RESULT();
4254
4255         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
4256
4257         HWRM_UNLOCK();
4258
4259         return rc;
4260 }
4261
4262 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
4263                             void *encaped, size_t ec_size)
4264 {
4265         int rc = 0;
4266         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
4267         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4268
4269         if (ec_size > sizeof(req.encap_request))
4270                 return -1;
4271
4272         HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
4273
4274         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4275         memcpy(req.encap_request, encaped, ec_size);
4276
4277         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4278
4279         HWRM_CHECK_RESULT();
4280         HWRM_UNLOCK();
4281
4282         return rc;
4283 }
4284
4285 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
4286                          struct rte_eth_stats *stats, uint8_t rx)
4287 {
4288         int rc = 0;
4289         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
4290         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
4291
4292         HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
4293
4294         req.stat_ctx_id = rte_cpu_to_le_32(cid);
4295
4296         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4297
4298         HWRM_CHECK_RESULT();
4299
4300         if (rx) {
4301                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4302                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
4303                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
4304                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4305                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
4306                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
4307                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
4308                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
4309         } else {
4310                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4311                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
4312                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
4313                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4314                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
4315                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
4316         }
4317
4318         HWRM_UNLOCK();
4319
4320         return rc;
4321 }
4322
4323 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4324 {
4325         struct hwrm_port_qstats_input req = {0};
4326         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4327         struct bnxt_pf_info *pf = bp->pf;
4328         int rc;
4329
4330         HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4331
4332         req.port_id = rte_cpu_to_le_16(pf->port_id);
4333         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4334         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4335         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4336
4337         HWRM_CHECK_RESULT();
4338         HWRM_UNLOCK();
4339
4340         return rc;
4341 }
4342
4343 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4344 {
4345         struct hwrm_port_clr_stats_input req = {0};
4346         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4347         struct bnxt_pf_info *pf = bp->pf;
4348         int rc;
4349
4350         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
4351         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4352             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4353                 return 0;
4354
4355         HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4356
4357         req.port_id = rte_cpu_to_le_16(pf->port_id);
4358         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4359
4360         HWRM_CHECK_RESULT();
4361         HWRM_UNLOCK();
4362
4363         return rc;
4364 }
4365
4366 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4367 {
4368         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4369         struct hwrm_port_led_qcaps_input req = {0};
4370         int rc;
4371
4372         if (BNXT_VF(bp))
4373                 return 0;
4374
4375         HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4376         req.port_id = bp->pf->port_id;
4377         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4378
4379         HWRM_CHECK_RESULT();
4380
4381         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4382                 unsigned int i;
4383
4384                 bp->leds->num_leds = resp->num_leds;
4385                 memcpy(bp->leds, &resp->led0_id,
4386                         sizeof(bp->leds[0]) * bp->leds->num_leds);
4387                 for (i = 0; i < bp->leds->num_leds; i++) {
4388                         struct bnxt_led_info *led = &bp->leds[i];
4389
4390                         uint16_t caps = led->led_state_caps;
4391
4392                         if (!led->led_group_id ||
4393                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4394                                 bp->leds->num_leds = 0;
4395                                 break;
4396                         }
4397                 }
4398         }
4399
4400         HWRM_UNLOCK();
4401
4402         return rc;
4403 }
4404
4405 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4406 {
4407         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4408         struct hwrm_port_led_cfg_input req = {0};
4409         struct bnxt_led_cfg *led_cfg;
4410         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4411         uint16_t duration = 0;
4412         int rc, i;
4413
4414         if (!bp->leds->num_leds || BNXT_VF(bp))
4415                 return -EOPNOTSUPP;
4416
4417         HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4418
4419         if (led_on) {
4420                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4421                 duration = rte_cpu_to_le_16(500);
4422         }
4423         req.port_id = bp->pf->port_id;
4424         req.num_leds = bp->leds->num_leds;
4425         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4426         for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4427                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4428                 led_cfg->led_id = bp->leds[i].led_id;
4429                 led_cfg->led_state = led_state;
4430                 led_cfg->led_blink_on = duration;
4431                 led_cfg->led_blink_off = duration;
4432                 led_cfg->led_group_id = bp->leds[i].led_group_id;
4433         }
4434
4435         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4436
4437         HWRM_CHECK_RESULT();
4438         HWRM_UNLOCK();
4439
4440         return rc;
4441 }
4442
4443 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4444                                uint32_t *length)
4445 {
4446         int rc;
4447         struct hwrm_nvm_get_dir_info_input req = {0};
4448         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4449
4450         HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4451
4452         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4453
4454         HWRM_CHECK_RESULT();
4455
4456         *entries = rte_le_to_cpu_32(resp->entries);
4457         *length = rte_le_to_cpu_32(resp->entry_length);
4458
4459         HWRM_UNLOCK();
4460         return rc;
4461 }
4462
4463 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4464 {
4465         int rc;
4466         uint32_t dir_entries;
4467         uint32_t entry_length;
4468         uint8_t *buf;
4469         size_t buflen;
4470         rte_iova_t dma_handle;
4471         struct hwrm_nvm_get_dir_entries_input req = {0};
4472         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4473
4474         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4475         if (rc != 0)
4476                 return rc;
4477
4478         *data++ = dir_entries;
4479         *data++ = entry_length;
4480         len -= 2;
4481         memset(data, 0xff, len);
4482
4483         buflen = dir_entries * entry_length;
4484         buf = rte_malloc("nvm_dir", buflen, 0);
4485         if (buf == NULL)
4486                 return -ENOMEM;
4487         dma_handle = rte_malloc_virt2iova(buf);
4488         if (dma_handle == RTE_BAD_IOVA) {
4489                 rte_free(buf);
4490                 PMD_DRV_LOG(ERR,
4491                         "unable to map response address to physical memory\n");
4492                 return -ENOMEM;
4493         }
4494         HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4495         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4496         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4497
4498         if (rc == 0)
4499                 memcpy(data, buf, len > buflen ? buflen : len);
4500
4501         rte_free(buf);
4502         HWRM_CHECK_RESULT();
4503         HWRM_UNLOCK();
4504
4505         return rc;
4506 }
4507
4508 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4509                              uint32_t offset, uint32_t length,
4510                              uint8_t *data)
4511 {
4512         int rc;
4513         uint8_t *buf;
4514         rte_iova_t dma_handle;
4515         struct hwrm_nvm_read_input req = {0};
4516         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4517
4518         buf = rte_malloc("nvm_item", length, 0);
4519         if (!buf)
4520                 return -ENOMEM;
4521
4522         dma_handle = rte_malloc_virt2iova(buf);
4523         if (dma_handle == RTE_BAD_IOVA) {
4524                 rte_free(buf);
4525                 PMD_DRV_LOG(ERR,
4526                         "unable to map response address to physical memory\n");
4527                 return -ENOMEM;
4528         }
4529         HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4530         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4531         req.dir_idx = rte_cpu_to_le_16(index);
4532         req.offset = rte_cpu_to_le_32(offset);
4533         req.len = rte_cpu_to_le_32(length);
4534         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4535         if (rc == 0)
4536                 memcpy(data, buf, length);
4537
4538         rte_free(buf);
4539         HWRM_CHECK_RESULT();
4540         HWRM_UNLOCK();
4541
4542         return rc;
4543 }
4544
4545 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4546 {
4547         int rc;
4548         struct hwrm_nvm_erase_dir_entry_input req = {0};
4549         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4550
4551         HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4552         req.dir_idx = rte_cpu_to_le_16(index);
4553         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4554         HWRM_CHECK_RESULT();
4555         HWRM_UNLOCK();
4556
4557         return rc;
4558 }
4559
4560
4561 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4562                           uint16_t dir_ordinal, uint16_t dir_ext,
4563                           uint16_t dir_attr, const uint8_t *data,
4564                           size_t data_len)
4565 {
4566         int rc;
4567         struct hwrm_nvm_write_input req = {0};
4568         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4569         rte_iova_t dma_handle;
4570         uint8_t *buf;
4571
4572         buf = rte_malloc("nvm_write", data_len, 0);
4573         if (!buf)
4574                 return -ENOMEM;
4575
4576         dma_handle = rte_malloc_virt2iova(buf);
4577         if (dma_handle == RTE_BAD_IOVA) {
4578                 rte_free(buf);
4579                 PMD_DRV_LOG(ERR,
4580                         "unable to map response address to physical memory\n");
4581                 return -ENOMEM;
4582         }
4583         memcpy(buf, data, data_len);
4584
4585         HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4586
4587         req.dir_type = rte_cpu_to_le_16(dir_type);
4588         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4589         req.dir_ext = rte_cpu_to_le_16(dir_ext);
4590         req.dir_attr = rte_cpu_to_le_16(dir_attr);
4591         req.dir_data_length = rte_cpu_to_le_32(data_len);
4592         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4593
4594         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4595
4596         rte_free(buf);
4597         HWRM_CHECK_RESULT();
4598         HWRM_UNLOCK();
4599
4600         return rc;
4601 }
4602
4603 static void
4604 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4605 {
4606         uint32_t *count = cbdata;
4607
4608         *count = *count + 1;
4609 }
4610
4611 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4612                                      struct bnxt_vnic_info *vnic __rte_unused)
4613 {
4614         return 0;
4615 }
4616
4617 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4618 {
4619         uint32_t count = 0;
4620
4621         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4622             &count, bnxt_vnic_count_hwrm_stub);
4623
4624         return count;
4625 }
4626
4627 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4628                                         uint16_t *vnic_ids)
4629 {
4630         struct hwrm_func_vf_vnic_ids_query_input req = {0};
4631         struct hwrm_func_vf_vnic_ids_query_output *resp =
4632                                                 bp->hwrm_cmd_resp_addr;
4633         int rc;
4634
4635         /* First query all VNIC ids */
4636         HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4637
4638         req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4639         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4640         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4641
4642         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4643                 HWRM_UNLOCK();
4644                 PMD_DRV_LOG(ERR,
4645                 "unable to map VNIC ID table address to physical memory\n");
4646                 return -ENOMEM;
4647         }
4648         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4649         HWRM_CHECK_RESULT();
4650         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4651
4652         HWRM_UNLOCK();
4653
4654         return rc;
4655 }
4656
4657 /*
4658  * This function queries the VNIC IDs  for a specified VF. It then calls
4659  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4660  * Then it calls the hwrm_cb function to program this new vnic configuration.
4661  */
4662 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4663         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4664         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4665 {
4666         struct bnxt_vnic_info vnic;
4667         int rc = 0;
4668         int i, num_vnic_ids;
4669         uint16_t *vnic_ids;
4670         size_t vnic_id_sz;
4671         size_t sz;
4672
4673         /* First query all VNIC ids */
4674         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4675         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4676                         RTE_CACHE_LINE_SIZE);
4677         if (vnic_ids == NULL)
4678                 return -ENOMEM;
4679
4680         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4681                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4682
4683         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4684
4685         if (num_vnic_ids < 0)
4686                 return num_vnic_ids;
4687
4688         /* Retrieve VNIC, update bd_stall then update */
4689
4690         for (i = 0; i < num_vnic_ids; i++) {
4691                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4692                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4693                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4694                 if (rc)
4695                         break;
4696                 if (vnic.mru <= 4)      /* Indicates unallocated */
4697                         continue;
4698
4699                 vnic_cb(&vnic, cbdata);
4700
4701                 rc = hwrm_cb(bp, &vnic);
4702                 if (rc)
4703                         break;
4704         }
4705
4706         rte_free(vnic_ids);
4707
4708         return rc;
4709 }
4710
4711 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4712                                               bool on)
4713 {
4714         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4715         struct hwrm_func_cfg_input req = {0};
4716         int rc;
4717
4718         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4719
4720         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4721         req.enables |= rte_cpu_to_le_32(
4722                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4723         req.vlan_antispoof_mode = on ?
4724                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4725                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4726         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4727
4728         HWRM_CHECK_RESULT();
4729         HWRM_UNLOCK();
4730
4731         return rc;
4732 }
4733
4734 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4735 {
4736         struct bnxt_vnic_info vnic;
4737         uint16_t *vnic_ids;
4738         size_t vnic_id_sz;
4739         int num_vnic_ids, i;
4740         size_t sz;
4741         int rc;
4742
4743         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4744         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4745                         RTE_CACHE_LINE_SIZE);
4746         if (vnic_ids == NULL)
4747                 return -ENOMEM;
4748
4749         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4750                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4751
4752         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4753         if (rc <= 0)
4754                 goto exit;
4755         num_vnic_ids = rc;
4756
4757         /*
4758          * Loop through to find the default VNIC ID.
4759          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4760          * by sending the hwrm_func_qcfg command to the firmware.
4761          */
4762         for (i = 0; i < num_vnic_ids; i++) {
4763                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4764                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4765                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4766                                         bp->pf->first_vf_id + vf);
4767                 if (rc)
4768                         goto exit;
4769                 if (vnic.func_default) {
4770                         rte_free(vnic_ids);
4771                         return vnic.fw_vnic_id;
4772                 }
4773         }
4774         /* Could not find a default VNIC. */
4775         PMD_DRV_LOG(ERR, "No default VNIC\n");
4776 exit:
4777         rte_free(vnic_ids);
4778         return rc;
4779 }
4780
4781 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4782                          uint16_t dst_id,
4783                          struct bnxt_filter_info *filter)
4784 {
4785         int rc = 0;
4786         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4787         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4788         uint32_t enables = 0;
4789
4790         if (filter->fw_em_filter_id != UINT64_MAX)
4791                 bnxt_hwrm_clear_em_filter(bp, filter);
4792
4793         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4794
4795         req.flags = rte_cpu_to_le_32(filter->flags);
4796
4797         enables = filter->enables |
4798               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4799         req.dst_id = rte_cpu_to_le_16(dst_id);
4800
4801         if (filter->ip_addr_type) {
4802                 req.ip_addr_type = filter->ip_addr_type;
4803                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4804         }
4805         if (enables &
4806             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4807                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4808         if (enables &
4809             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4810                 memcpy(req.src_macaddr, filter->src_macaddr,
4811                        RTE_ETHER_ADDR_LEN);
4812         if (enables &
4813             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4814                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4815                        RTE_ETHER_ADDR_LEN);
4816         if (enables &
4817             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4818                 req.ovlan_vid = filter->l2_ovlan;
4819         if (enables &
4820             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4821                 req.ivlan_vid = filter->l2_ivlan;
4822         if (enables &
4823             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4824                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4825         if (enables &
4826             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4827                 req.ip_protocol = filter->ip_protocol;
4828         if (enables &
4829             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4830                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4831         if (enables &
4832             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4833                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4834         if (enables &
4835             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4836                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4837         if (enables &
4838             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4839                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4840         if (enables &
4841             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4842                 req.mirror_vnic_id = filter->mirror_vnic_id;
4843
4844         req.enables = rte_cpu_to_le_32(enables);
4845
4846         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4847
4848         HWRM_CHECK_RESULT();
4849
4850         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4851         HWRM_UNLOCK();
4852
4853         return rc;
4854 }
4855
4856 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4857 {
4858         int rc = 0;
4859         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4860         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4861
4862         if (filter->fw_em_filter_id == UINT64_MAX)
4863                 return 0;
4864
4865         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4866
4867         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4868
4869         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4870
4871         HWRM_CHECK_RESULT();
4872         HWRM_UNLOCK();
4873
4874         filter->fw_em_filter_id = UINT64_MAX;
4875         filter->fw_l2_filter_id = UINT64_MAX;
4876
4877         return 0;
4878 }
4879
4880 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4881                          uint16_t dst_id,
4882                          struct bnxt_filter_info *filter)
4883 {
4884         int rc = 0;
4885         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4886         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4887                                                 bp->hwrm_cmd_resp_addr;
4888         uint32_t enables = 0;
4889
4890         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4891                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4892
4893         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4894
4895         req.flags = rte_cpu_to_le_32(filter->flags);
4896
4897         enables = filter->enables |
4898               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4899         req.dst_id = rte_cpu_to_le_16(dst_id);
4900
4901         if (filter->ip_addr_type) {
4902                 req.ip_addr_type = filter->ip_addr_type;
4903                 enables |=
4904                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4905         }
4906         if (enables &
4907             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4908                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4909         if (enables &
4910             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4911                 memcpy(req.src_macaddr, filter->src_macaddr,
4912                        RTE_ETHER_ADDR_LEN);
4913         if (enables &
4914             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4915                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4916         if (enables &
4917             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4918                 req.ip_protocol = filter->ip_protocol;
4919         if (enables &
4920             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4921                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4922         if (enables &
4923             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4924                 req.src_ipaddr_mask[0] =
4925                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4926         if (enables &
4927             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4928                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4929         if (enables &
4930             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4931                 req.dst_ipaddr_mask[0] =
4932                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4933         if (enables &
4934             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4935                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4936         if (enables &
4937             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4938                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4939         if (enables &
4940             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4941                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4942         if (enables &
4943             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4944                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4945         if (enables &
4946             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4947                 req.mirror_vnic_id = filter->mirror_vnic_id;
4948
4949         req.enables = rte_cpu_to_le_32(enables);
4950
4951         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4952
4953         HWRM_CHECK_RESULT();
4954
4955         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4956         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4957         HWRM_UNLOCK();
4958
4959         return rc;
4960 }
4961
4962 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4963                                 struct bnxt_filter_info *filter)
4964 {
4965         int rc = 0;
4966         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4967         struct hwrm_cfa_ntuple_filter_free_output *resp =
4968                                                 bp->hwrm_cmd_resp_addr;
4969
4970         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4971                 return 0;
4972
4973         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4974
4975         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4976
4977         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4978
4979         HWRM_CHECK_RESULT();
4980         HWRM_UNLOCK();
4981
4982         filter->fw_ntuple_filter_id = UINT64_MAX;
4983
4984         return 0;
4985 }
4986
4987 static int
4988 bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4989 {
4990         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4991         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4992         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4993         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4994         uint16_t *ring_tbl = vnic->rss_table;
4995         int nr_ctxs = vnic->num_lb_ctxts;
4996         int max_rings = bp->rx_nr_rings;
4997         int i, j, k, cnt;
4998         int rc = 0;
4999
5000         for (i = 0, k = 0; i < nr_ctxs; i++) {
5001                 struct bnxt_rx_ring_info *rxr;
5002                 struct bnxt_cp_ring_info *cpr;
5003
5004                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
5005
5006                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
5007                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
5008                 req.hash_mode_flags = vnic->hash_mode;
5009
5010                 req.ring_grp_tbl_addr =
5011                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
5012                                      i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
5013                                      2 * sizeof(*ring_tbl));
5014                 req.hash_key_tbl_addr =
5015                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
5016
5017                 req.ring_table_pair_index = i;
5018                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
5019
5020                 for (j = 0; j < 64; j++) {
5021                         uint16_t ring_id;
5022
5023                         /* Find next active ring. */
5024                         for (cnt = 0; cnt < max_rings; cnt++) {
5025                                 if (rx_queue_state[k] !=
5026                                                 RTE_ETH_QUEUE_STATE_STOPPED)
5027                                         break;
5028                                 if (++k == max_rings)
5029                                         k = 0;
5030                         }
5031
5032                         /* Return if no rings are active. */
5033                         if (cnt == max_rings) {
5034                                 HWRM_UNLOCK();
5035                                 return 0;
5036                         }
5037
5038                         /* Add rx/cp ring pair to RSS table. */
5039                         rxr = rxqs[k]->rx_ring;
5040                         cpr = rxqs[k]->cp_ring;
5041
5042                         ring_id = rxr->rx_ring_struct->fw_ring_id;
5043                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
5044                         ring_id = cpr->cp_ring_struct->fw_ring_id;
5045                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
5046
5047                         if (++k == max_rings)
5048                                 k = 0;
5049                 }
5050                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5051                                             BNXT_USE_CHIMP_MB);
5052
5053                 HWRM_CHECK_RESULT();
5054                 HWRM_UNLOCK();
5055         }
5056
5057         return rc;
5058 }
5059
5060 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5061 {
5062         unsigned int rss_idx, fw_idx, i;
5063
5064         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5065                 return 0;
5066
5067         if (!(vnic->rss_table && vnic->hash_type))
5068                 return 0;
5069
5070         if (BNXT_CHIP_P5(bp))
5071                 return bnxt_vnic_rss_configure_p5(bp, vnic);
5072
5073         /*
5074          * Fill the RSS hash & redirection table with
5075          * ring group ids for all VNICs
5076          */
5077         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
5078              rss_idx++, fw_idx++) {
5079                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
5080                         fw_idx %= bp->rx_cp_nr_rings;
5081                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
5082                                 break;
5083                         fw_idx++;
5084                 }
5085
5086                 if (i == bp->rx_cp_nr_rings)
5087                         return 0;
5088
5089                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
5090         }
5091
5092         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
5093 }
5094
5095 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
5096         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5097 {
5098         uint16_t flags;
5099
5100         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
5101
5102         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
5103         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
5104
5105         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
5106         req->num_cmpl_dma_aggr_during_int =
5107                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
5108
5109         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
5110
5111         /* min timer set to 1/2 of interrupt timer */
5112         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
5113
5114         /* buf timer set to 1/4 of interrupt timer */
5115         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
5116
5117         req->cmpl_aggr_dma_tmr_during_int =
5118                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
5119
5120         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5121                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5122         req->flags = rte_cpu_to_le_16(flags);
5123 }
5124
5125 static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
5126                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
5127 {
5128         struct hwrm_ring_aggint_qcaps_input req = {0};
5129         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5130         uint32_t enables;
5131         uint16_t flags;
5132         int rc;
5133
5134         HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
5135         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5136         HWRM_CHECK_RESULT();
5137
5138         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
5139         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
5140
5141         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5142                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5143         agg_req->flags = rte_cpu_to_le_16(flags);
5144         enables =
5145          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
5146          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
5147         agg_req->enables = rte_cpu_to_le_32(enables);
5148
5149         HWRM_UNLOCK();
5150         return rc;
5151 }
5152
5153 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
5154                         struct bnxt_coal *coal, uint16_t ring_id)
5155 {
5156         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5157         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
5158                                                 bp->hwrm_cmd_resp_addr;
5159         int rc;
5160
5161         /* Set ring coalesce parameters only for 100G NICs */
5162         if (BNXT_CHIP_P5(bp)) {
5163                 if (bnxt_hwrm_set_coal_params_p5(bp, &req))
5164                         return -1;
5165         } else if (bnxt_stratus_device(bp)) {
5166                 bnxt_hwrm_set_coal_params(coal, &req);
5167         } else {
5168                 return 0;
5169         }
5170
5171         HWRM_PREP(&req,
5172                   HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5173                   BNXT_USE_CHIMP_MB);
5174         req.ring_id = rte_cpu_to_le_16(ring_id);
5175         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5176         HWRM_CHECK_RESULT();
5177         HWRM_UNLOCK();
5178         return 0;
5179 }
5180
5181 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
5182 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5183 {
5184         struct hwrm_func_backing_store_qcaps_input req = {0};
5185         struct hwrm_func_backing_store_qcaps_output *resp =
5186                 bp->hwrm_cmd_resp_addr;
5187         struct bnxt_ctx_pg_info *ctx_pg;
5188         struct bnxt_ctx_mem_info *ctx;
5189         int total_alloc_len;
5190         int rc, i, tqm_rings;
5191
5192         if (!BNXT_CHIP_P5(bp) ||
5193             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
5194             BNXT_VF(bp) ||
5195             bp->ctx)
5196                 return 0;
5197
5198         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
5199         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5200         HWRM_CHECK_RESULT_SILENT();
5201
5202         total_alloc_len = sizeof(*ctx);
5203         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
5204                           RTE_CACHE_LINE_SIZE);
5205         if (!ctx) {
5206                 rc = -ENOMEM;
5207                 goto ctx_err;
5208         }
5209
5210         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
5211         ctx->qp_min_qp1_entries =
5212                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
5213         ctx->qp_max_l2_entries =
5214                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
5215         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
5216         ctx->srq_max_l2_entries =
5217                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
5218         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
5219         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
5220         ctx->cq_max_l2_entries =
5221                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
5222         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
5223         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
5224         ctx->vnic_max_vnic_entries =
5225                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
5226         ctx->vnic_max_ring_table_entries =
5227                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
5228         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
5229         ctx->stat_max_entries =
5230                 rte_le_to_cpu_32(resp->stat_max_entries);
5231         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
5232         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
5233         ctx->tqm_min_entries_per_ring =
5234                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
5235         ctx->tqm_max_entries_per_ring =
5236                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
5237         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5238         if (!ctx->tqm_entries_multiple)
5239                 ctx->tqm_entries_multiple = 1;
5240         ctx->mrav_max_entries =
5241                 rte_le_to_cpu_32(resp->mrav_max_entries);
5242         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
5243         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
5244         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
5245         ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
5246
5247         ctx->tqm_fp_rings_count = ctx->tqm_fp_rings_count ?
5248                                   RTE_MIN(ctx->tqm_fp_rings_count,
5249                                           BNXT_MAX_TQM_FP_LEGACY_RINGS) :
5250                                   bp->max_q;
5251
5252         /* Check if the ext ring count needs to be counted.
5253          * Ext ring count is available only with new FW so we should not
5254          * look at the field on older FW.
5255          */
5256         if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
5257             bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
5258                 ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
5259                 ctx->tqm_fp_rings_count = RTE_MIN(BNXT_MAX_TQM_FP_RINGS,
5260                                                   ctx->tqm_fp_rings_count);
5261         }
5262
5263         tqm_rings = ctx->tqm_fp_rings_count + 1;
5264
5265         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
5266                             sizeof(*ctx_pg) * tqm_rings,
5267                             RTE_CACHE_LINE_SIZE);
5268         if (!ctx_pg) {
5269                 rc = -ENOMEM;
5270                 goto ctx_err;
5271         }
5272         for (i = 0; i < tqm_rings; i++, ctx_pg++)
5273                 ctx->tqm_mem[i] = ctx_pg;
5274
5275         bp->ctx = ctx;
5276 ctx_err:
5277         HWRM_UNLOCK();
5278         return rc;
5279 }
5280
5281 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
5282 {
5283         struct hwrm_func_backing_store_cfg_input req = {0};
5284         struct hwrm_func_backing_store_cfg_output *resp =
5285                 bp->hwrm_cmd_resp_addr;
5286         struct bnxt_ctx_mem_info *ctx = bp->ctx;
5287         struct bnxt_ctx_pg_info *ctx_pg;
5288         uint32_t *num_entries;
5289         uint64_t *pg_dir;
5290         uint8_t *pg_attr;
5291         uint32_t ena;
5292         int i, rc;
5293
5294         if (!ctx)
5295                 return 0;
5296
5297         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
5298         req.enables = rte_cpu_to_le_32(enables);
5299
5300         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
5301                 ctx_pg = &ctx->qp_mem;
5302                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5303                 req.qp_num_qp1_entries =
5304                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
5305                 req.qp_num_l2_entries =
5306                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
5307                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
5308                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5309                                       &req.qpc_pg_size_qpc_lvl,
5310                                       &req.qpc_page_dir);
5311         }
5312
5313         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5314                 ctx_pg = &ctx->srq_mem;
5315                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5316                 req.srq_num_l2_entries =
5317                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5318                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5319                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5320                                       &req.srq_pg_size_srq_lvl,
5321                                       &req.srq_page_dir);
5322         }
5323
5324         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5325                 ctx_pg = &ctx->cq_mem;
5326                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5327                 req.cq_num_l2_entries =
5328                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5329                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5330                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5331                                       &req.cq_pg_size_cq_lvl,
5332                                       &req.cq_page_dir);
5333         }
5334
5335         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5336                 ctx_pg = &ctx->vnic_mem;
5337                 req.vnic_num_vnic_entries =
5338                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5339                 req.vnic_num_ring_table_entries =
5340                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5341                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5342                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5343                                       &req.vnic_pg_size_vnic_lvl,
5344                                       &req.vnic_page_dir);
5345         }
5346
5347         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5348                 ctx_pg = &ctx->stat_mem;
5349                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5350                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5351                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5352                                       &req.stat_pg_size_stat_lvl,
5353                                       &req.stat_page_dir);
5354         }
5355
5356         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5357         num_entries = &req.tqm_sp_num_entries;
5358         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5359         pg_dir = &req.tqm_sp_page_dir;
5360         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5361         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5362                 if (!(enables & ena))
5363                         continue;
5364
5365                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5366
5367                 ctx_pg = ctx->tqm_mem[i];
5368                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5369                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5370         }
5371
5372         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
5373                 /* DPDK does not need to configure MRAV and TIM type.
5374                  * So we are skipping over MRAV and TIM. Skip to configure
5375                  * HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8.
5376                  */
5377                 ctx_pg = ctx->tqm_mem[BNXT_MAX_TQM_LEGACY_RINGS];
5378                 req.tqm_ring8_num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5379                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5380                                       &req.tqm_ring8_pg_size_tqm_ring_lvl,
5381                                       &req.tqm_ring8_page_dir);
5382         }
5383
5384         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5385         HWRM_CHECK_RESULT();
5386         HWRM_UNLOCK();
5387
5388         return rc;
5389 }
5390
5391 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5392 {
5393         struct hwrm_port_qstats_ext_input req = {0};
5394         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5395         struct bnxt_pf_info *pf = bp->pf;
5396         int rc;
5397
5398         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5399               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5400                 return 0;
5401
5402         HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5403
5404         req.port_id = rte_cpu_to_le_16(pf->port_id);
5405         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5406                 req.tx_stat_host_addr =
5407                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5408                 req.tx_stat_size =
5409                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5410         }
5411         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5412                 req.rx_stat_host_addr =
5413                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5414                 req.rx_stat_size =
5415                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5416         }
5417         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5418
5419         if (rc) {
5420                 bp->fw_rx_port_stats_ext_size = 0;
5421                 bp->fw_tx_port_stats_ext_size = 0;
5422         } else {
5423                 bp->fw_rx_port_stats_ext_size =
5424                         rte_le_to_cpu_16(resp->rx_stat_size);
5425                 bp->fw_tx_port_stats_ext_size =
5426                         rte_le_to_cpu_16(resp->tx_stat_size);
5427         }
5428
5429         HWRM_CHECK_RESULT();
5430         HWRM_UNLOCK();
5431
5432         return rc;
5433 }
5434
5435 int
5436 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5437 {
5438         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5439         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5440                 bp->hwrm_cmd_resp_addr;
5441         int rc = 0;
5442
5443         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5444         req.tunnel_type = type;
5445         req.dest_fid = bp->fw_fid;
5446         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5447         HWRM_CHECK_RESULT();
5448
5449         HWRM_UNLOCK();
5450
5451         return rc;
5452 }
5453
5454 int
5455 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5456 {
5457         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5458         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5459                 bp->hwrm_cmd_resp_addr;
5460         int rc = 0;
5461
5462         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5463         req.tunnel_type = type;
5464         req.dest_fid = bp->fw_fid;
5465         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5466         HWRM_CHECK_RESULT();
5467
5468         HWRM_UNLOCK();
5469
5470         return rc;
5471 }
5472
5473 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5474 {
5475         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5476         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5477                 bp->hwrm_cmd_resp_addr;
5478         int rc = 0;
5479
5480         HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5481         req.src_fid = bp->fw_fid;
5482         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5483         HWRM_CHECK_RESULT();
5484
5485         if (type)
5486                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5487
5488         HWRM_UNLOCK();
5489
5490         return rc;
5491 }
5492
5493 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5494                                    uint16_t *dst_fid)
5495 {
5496         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5497         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5498                 bp->hwrm_cmd_resp_addr;
5499         int rc = 0;
5500
5501         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5502         req.src_fid = bp->fw_fid;
5503         req.tunnel_type = tun_type;
5504         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5505         HWRM_CHECK_RESULT();
5506
5507         if (dst_fid)
5508                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5509
5510         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5511
5512         HWRM_UNLOCK();
5513
5514         return rc;
5515 }
5516
5517 int bnxt_hwrm_set_mac(struct bnxt *bp)
5518 {
5519         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5520         struct hwrm_func_vf_cfg_input req = {0};
5521         int rc = 0;
5522
5523         if (!BNXT_VF(bp))
5524                 return 0;
5525
5526         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5527
5528         req.enables =
5529                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5530         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5531
5532         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5533
5534         HWRM_CHECK_RESULT();
5535
5536         HWRM_UNLOCK();
5537
5538         return rc;
5539 }
5540
5541 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5542 {
5543         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5544         struct hwrm_func_drv_if_change_input req = {0};
5545         uint32_t flags;
5546         int rc;
5547
5548         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5549                 return 0;
5550
5551         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5552          * If we issue FUNC_DRV_IF_CHANGE with flags down before
5553          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5554          */
5555         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5556                 return 0;
5557
5558         HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5559
5560         if (up)
5561                 req.flags =
5562                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5563
5564         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5565
5566         HWRM_CHECK_RESULT();
5567         flags = rte_le_to_cpu_32(resp->flags);
5568         HWRM_UNLOCK();
5569
5570         if (!up)
5571                 return 0;
5572
5573         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5574                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5575                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5576         }
5577
5578         return 0;
5579 }
5580
5581 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5582 {
5583         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5584         struct bnxt_error_recovery_info *info = bp->recovery_info;
5585         struct hwrm_error_recovery_qcfg_input req = {0};
5586         uint32_t flags = 0;
5587         unsigned int i;
5588         int rc;
5589
5590         /* Older FW does not have error recovery support */
5591         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5592                 return 0;
5593
5594         HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5595
5596         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5597
5598         HWRM_CHECK_RESULT();
5599
5600         flags = rte_le_to_cpu_32(resp->flags);
5601         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5602                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5603         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5604                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5605
5606         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5607             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5608                 rc = -EINVAL;
5609                 goto err;
5610         }
5611
5612         /* FW returned values are in units of 100msec */
5613         info->driver_polling_freq =
5614                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5615         info->master_func_wait_period =
5616                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5617         info->normal_func_wait_period =
5618                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5619         info->master_func_wait_period_after_reset =
5620                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5621         info->max_bailout_time_after_reset =
5622                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5623         info->status_regs[BNXT_FW_STATUS_REG] =
5624                 rte_le_to_cpu_32(resp->fw_health_status_reg);
5625         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5626                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5627         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5628                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5629         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5630                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5631         info->reg_array_cnt =
5632                 rte_le_to_cpu_32(resp->reg_array_cnt);
5633
5634         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5635                 rc = -EINVAL;
5636                 goto err;
5637         }
5638
5639         for (i = 0; i < info->reg_array_cnt; i++) {
5640                 info->reset_reg[i] =
5641                         rte_le_to_cpu_32(resp->reset_reg[i]);
5642                 info->reset_reg_val[i] =
5643                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
5644                 info->delay_after_reset[i] =
5645                         resp->delay_after_reset[i];
5646         }
5647 err:
5648         HWRM_UNLOCK();
5649
5650         /* Map the FW status registers */
5651         if (!rc)
5652                 rc = bnxt_map_fw_health_status_regs(bp);
5653
5654         if (rc) {
5655                 rte_free(bp->recovery_info);
5656                 bp->recovery_info = NULL;
5657         }
5658         return rc;
5659 }
5660
5661 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5662 {
5663         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5664         struct hwrm_fw_reset_input req = {0};
5665         int rc;
5666
5667         if (!BNXT_PF(bp))
5668                 return -EOPNOTSUPP;
5669
5670         HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5671
5672         req.embedded_proc_type =
5673                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5674         req.selfrst_status =
5675                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5676         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5677
5678         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5679                                     BNXT_USE_KONG(bp));
5680
5681         HWRM_CHECK_RESULT();
5682         HWRM_UNLOCK();
5683
5684         return rc;
5685 }
5686
5687 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5688 {
5689         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5690         struct hwrm_port_ts_query_input req = {0};
5691         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5692         uint32_t flags = 0;
5693         int rc;
5694
5695         if (!ptp)
5696                 return 0;
5697
5698         HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5699
5700         switch (path) {
5701         case BNXT_PTP_FLAGS_PATH_TX:
5702                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5703                 break;
5704         case BNXT_PTP_FLAGS_PATH_RX:
5705                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5706                 break;
5707         case BNXT_PTP_FLAGS_CURRENT_TIME:
5708                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5709                 break;
5710         }
5711
5712         req.flags = rte_cpu_to_le_32(flags);
5713         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5714
5715         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5716
5717         HWRM_CHECK_RESULT();
5718
5719         if (timestamp) {
5720                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5721                 *timestamp |=
5722                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5723         }
5724         HWRM_UNLOCK();
5725
5726         return rc;
5727 }
5728
5729 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5730 {
5731         int rc = 0;
5732
5733         struct hwrm_cfa_counter_qcaps_input req = {0};
5734         struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5735
5736         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5737                 PMD_DRV_LOG(DEBUG,
5738                             "Not a PF or trusted VF. Command not supported\n");
5739                 return 0;
5740         }
5741
5742         HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5743         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5744         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5745
5746         HWRM_CHECK_RESULT();
5747         if (max_fc)
5748                 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5749         HWRM_UNLOCK();
5750
5751         return 0;
5752 }
5753
5754 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5755 {
5756         int rc = 0;
5757         struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5758         struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5759
5760         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5761                 PMD_DRV_LOG(DEBUG,
5762                             "Not a PF or trusted VF. Command not supported\n");
5763                 return 0;
5764         }
5765
5766         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5767
5768         req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5769         req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5770         req.page_dir = rte_cpu_to_le_64(dma_addr);
5771
5772         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5773
5774         HWRM_CHECK_RESULT();
5775         if (ctx_id) {
5776                 *ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5777                 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5778         }
5779         HWRM_UNLOCK();
5780
5781         return 0;
5782 }
5783
5784 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5785 {
5786         int rc = 0;
5787         struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5788         struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5789
5790         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5791                 PMD_DRV_LOG(DEBUG,
5792                             "Not a PF or trusted VF. Command not supported\n");
5793                 return 0;
5794         }
5795
5796         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5797
5798         req.ctx_id = rte_cpu_to_le_16(ctx_id);
5799
5800         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5801
5802         HWRM_CHECK_RESULT();
5803         HWRM_UNLOCK();
5804
5805         return rc;
5806 }
5807
5808 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5809                               uint16_t cntr, uint16_t ctx_id,
5810                               uint32_t num_entries, bool enable)
5811 {
5812         struct hwrm_cfa_counter_cfg_input req = {0};
5813         struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5814         uint16_t flags = 0;
5815         int rc;
5816
5817         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5818                 PMD_DRV_LOG(DEBUG,
5819                             "Not a PF or trusted VF. Command not supported\n");
5820                 return 0;
5821         }
5822
5823         HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5824
5825         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5826         req.counter_type = rte_cpu_to_le_16(cntr);
5827         flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5828                 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5829         flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5830         if (dir == BNXT_DIR_RX)
5831                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5832         else if (dir == BNXT_DIR_TX)
5833                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5834         req.flags = rte_cpu_to_le_16(flags);
5835         req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5836         req.num_entries = rte_cpu_to_le_32(num_entries);
5837
5838         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5839         HWRM_CHECK_RESULT();
5840         HWRM_UNLOCK();
5841
5842         return 0;
5843 }
5844
5845 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5846                                  enum bnxt_flow_dir dir,
5847                                  uint16_t cntr,
5848                                  uint16_t num_entries)
5849 {
5850         struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5851         struct hwrm_cfa_counter_qstats_input req = {0};
5852         uint16_t flow_ctx_id = 0;
5853         uint16_t flags = 0;
5854         int rc = 0;
5855
5856         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5857                 PMD_DRV_LOG(DEBUG,
5858                             "Not a PF or trusted VF. Command not supported\n");
5859                 return 0;
5860         }
5861
5862         if (dir == BNXT_DIR_RX) {
5863                 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5864                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5865         } else if (dir == BNXT_DIR_TX) {
5866                 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5867                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5868         }
5869
5870         HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5871         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5872         req.counter_type = rte_cpu_to_le_16(cntr);
5873         req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5874         req.num_entries = rte_cpu_to_le_16(num_entries);
5875         req.flags = rte_cpu_to_le_16(flags);
5876         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5877
5878         HWRM_CHECK_RESULT();
5879         HWRM_UNLOCK();
5880
5881         return 0;
5882 }
5883
5884 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5885                                 uint16_t *first_vf_id)
5886 {
5887         int rc = 0;
5888         struct hwrm_func_qcaps_input req = {.req_type = 0 };
5889         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5890
5891         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5892
5893         req.fid = rte_cpu_to_le_16(fid);
5894
5895         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5896
5897         HWRM_CHECK_RESULT();
5898
5899         if (first_vf_id)
5900                 *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
5901
5902         HWRM_UNLOCK();
5903
5904         return rc;
5905 }
5906
5907 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
5908 {
5909         struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5910         struct hwrm_cfa_pair_alloc_input req = {0};
5911         int rc;
5912
5913         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5914                 PMD_DRV_LOG(DEBUG,
5915                             "Not a PF or trusted VF. Command not supported\n");
5916                 return 0;
5917         }
5918
5919         HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
5920         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5921         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5922                  bp->eth_dev->data->name, rep_bp->vf_id);
5923
5924         req.pf_b_id = rep_bp->parent_pf_idx;
5925         req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5926                                                 rte_cpu_to_le_16(rep_bp->vf_id);
5927         req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
5928         req.host_b_id = 1; /* TBD - Confirm if this is OK */
5929
5930         req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
5931                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
5932         req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
5933                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
5934         req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
5935                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
5936         req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
5937                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
5938
5939         req.q_ab = rep_bp->rep_q_r2f;
5940         req.q_ba = rep_bp->rep_q_f2r;
5941         req.fc_ab = rep_bp->rep_fc_r2f;
5942         req.fc_ba = rep_bp->rep_fc_f2r;
5943
5944         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5945         HWRM_CHECK_RESULT();
5946
5947         HWRM_UNLOCK();
5948         PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
5949                     BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
5950         return rc;
5951 }
5952
5953 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
5954 {
5955         struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
5956         struct hwrm_cfa_pair_free_input req = {0};
5957         int rc;
5958
5959         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5960                 PMD_DRV_LOG(DEBUG,
5961                             "Not a PF or trusted VF. Command not supported\n");
5962                 return 0;
5963         }
5964
5965         HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
5966         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5967                  bp->eth_dev->data->name, rep_bp->vf_id);
5968         req.pf_b_id = rep_bp->parent_pf_idx;
5969         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5970         req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5971                                                 rte_cpu_to_le_16(rep_bp->vf_id);
5972         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5973         HWRM_CHECK_RESULT();
5974         HWRM_UNLOCK();
5975         PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
5976                     rep_bp->vf_id);
5977         return rc;
5978 }
5979
5980 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
5981 {
5982         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
5983                                         bp->hwrm_cmd_resp_addr;
5984         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
5985         uint32_t flags = 0;
5986         int rc = 0;
5987
5988         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
5989                 return 0;
5990
5991         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5992                 PMD_DRV_LOG(DEBUG,
5993                             "Not a PF or trusted VF. Command not supported\n");
5994                 return 0;
5995         }
5996
5997         HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_CHIMP_MB);
5998         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5999
6000         HWRM_CHECK_RESULT();
6001         flags = rte_le_to_cpu_32(resp->flags);
6002         HWRM_UNLOCK();
6003
6004         if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED)
6005                 bp->flags |= BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2;
6006         else
6007                 bp->flags |= BNXT_FLAG_RFS_NEEDS_VNIC;
6008
6009         return rc;
6010 }
6011
6012 int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1,
6013                             uint32_t echo_req_data2)
6014 {
6015         struct hwrm_func_echo_response_input req = {0};
6016         struct hwrm_func_echo_response_output *resp = bp->hwrm_cmd_resp_addr;
6017         int rc;
6018
6019         HWRM_PREP(&req, HWRM_FUNC_ECHO_RESPONSE, BNXT_USE_CHIMP_MB);
6020         req.event_data1 = rte_cpu_to_le_32(echo_req_data1);
6021         req.event_data2 = rte_cpu_to_le_32(echo_req_data2);
6022
6023         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6024
6025         HWRM_CHECK_RESULT();
6026         HWRM_UNLOCK();
6027
6028         return rc;
6029 }
6030
6031 int bnxt_hwrm_poll_ver_get(struct bnxt *bp)
6032 {
6033         struct hwrm_ver_get_input req = {.req_type = 0 };
6034         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
6035         int rc = 0;
6036
6037         bp->max_req_len = HWRM_MAX_REQ_LEN;
6038         bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
6039
6040         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
6041         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6042         req.hwrm_intf_min = HWRM_VERSION_MINOR;
6043         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6044
6045         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
6046
6047         HWRM_CHECK_RESULT_SILENT();
6048         HWRM_UNLOCK();
6049
6050         return rc;
6051 }