net/bnxt: support timesync
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
198                         __func__, rc); \
199                 rte_spinlock_unlock(&bp->hwrm_lock); \
200                 return rc; \
201         } \
202         if (resp->error_code) { \
203                 rc = rte_le_to_cpu_16(resp->error_code); \
204                 if (resp->resp_len >= 16) { \
205                         struct hwrm_err_output *tmp_hwrm_err_op = \
206                                                 (void *)resp; \
207                         RTE_LOG(ERR, PMD, \
208                                 "%s error %d:%d:%08x:%04x\n", \
209                                 __func__, \
210                                 rc, tmp_hwrm_err_op->cmd_err, \
211                                 rte_le_to_cpu_32(\
212                                         tmp_hwrm_err_op->opaque_0), \
213                                 rte_le_to_cpu_16(\
214                                         tmp_hwrm_err_op->opaque_1)); \
215                 } \
216                 else { \
217                         RTE_LOG(ERR, PMD, \
218                                 "%s error %d\n", __func__, rc); \
219                 } \
220                 rte_spinlock_unlock(&bp->hwrm_lock); \
221                 return rc; \
222         } \
223 } while (0)
224
225 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
226
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
228 {
229         int rc = 0;
230         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
232
233         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
235         req.mask = 0;
236
237         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238
239         HWRM_CHECK_RESULT();
240         HWRM_UNLOCK();
241
242         return rc;
243 }
244
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246                                  struct bnxt_vnic_info *vnic,
247                                  uint16_t vlan_count,
248                                  struct bnxt_vlan_table_entry *vlan_table)
249 {
250         int rc = 0;
251         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
253         uint32_t mask = 0;
254
255         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
257
258         /* FIXME add multicast flag, when multicast adding options is supported
259          * by ethtool.
260          */
261         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
263         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
265         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
267         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
269         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271         if (vnic->mc_addr_cnt) {
272                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
273                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
274                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
275         }
276         if (vlan_table) {
277                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
278                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
279                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
280                          rte_mem_virt2iova(vlan_table));
281                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
282         }
283         req.mask = rte_cpu_to_le_32(mask);
284
285         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
286
287         HWRM_CHECK_RESULT();
288         HWRM_UNLOCK();
289
290         return rc;
291 }
292
293 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
294                         uint16_t vlan_count,
295                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
296 {
297         int rc = 0;
298         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
299         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
300                                                 bp->hwrm_cmd_resp_addr;
301
302         /*
303          * Older HWRM versions did not support this command, and the set_rx_mask
304          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
305          * removed from set_rx_mask call, and this command was added.
306          *
307          * This command is also present from 1.7.8.11 and higher,
308          * as well as 1.7.8.0
309          */
310         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
311                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
312                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
313                                         (11)))
314                                 return 0;
315                 }
316         }
317         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
318         req.fid = rte_cpu_to_le_16(fid);
319
320         req.vlan_tag_mask_tbl_addr =
321                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
322         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
323
324         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
325
326         HWRM_CHECK_RESULT();
327         HWRM_UNLOCK();
328
329         return rc;
330 }
331
332 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
333                            struct bnxt_filter_info *filter)
334 {
335         int rc = 0;
336         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
337         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
338
339         if (filter->fw_l2_filter_id == UINT64_MAX)
340                 return 0;
341
342         HWRM_PREP(req, CFA_L2_FILTER_FREE);
343
344         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
345
346         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
347
348         HWRM_CHECK_RESULT();
349         HWRM_UNLOCK();
350
351         filter->fw_l2_filter_id = -1;
352
353         return 0;
354 }
355
356 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
357                          uint16_t dst_id,
358                          struct bnxt_filter_info *filter)
359 {
360         int rc = 0;
361         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
362         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
363         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
364         const struct rte_eth_vmdq_rx_conf *conf =
365                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
366         uint32_t enables = 0;
367         uint16_t j = dst_id - 1;
368
369         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
370         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
371             conf->pool_map[j].pools & (1UL << j)) {
372                 RTE_LOG(DEBUG, PMD,
373                         "Add vlan %u to vmdq pool %u\n",
374                         conf->pool_map[j].vlan_id, j);
375
376                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
377                 filter->enables |=
378                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
379                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
380         }
381
382         if (filter->fw_l2_filter_id != UINT64_MAX)
383                 bnxt_hwrm_clear_l2_filter(bp, filter);
384
385         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
386
387         req.flags = rte_cpu_to_le_32(filter->flags);
388
389         enables = filter->enables |
390               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
391         req.dst_id = rte_cpu_to_le_16(dst_id);
392
393         if (enables &
394             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
395                 memcpy(req.l2_addr, filter->l2_addr,
396                        ETHER_ADDR_LEN);
397         if (enables &
398             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
399                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
400                        ETHER_ADDR_LEN);
401         if (enables &
402             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
403                 req.l2_ovlan = filter->l2_ovlan;
404         if (enables &
405             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
406                 req.l2_ovlan = filter->l2_ivlan;
407         if (enables &
408             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
409                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
410         if (enables &
411             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
412                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
413         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
414                 req.src_id = rte_cpu_to_le_32(filter->src_id);
415         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
416                 req.src_type = filter->src_type;
417
418         req.enables = rte_cpu_to_le_32(enables);
419
420         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
421
422         HWRM_CHECK_RESULT();
423
424         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
425         HWRM_UNLOCK();
426
427         return rc;
428 }
429
430 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
431 {
432         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
433         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
434         uint32_t flags = 0;
435         int rc;
436
437         if (!ptp)
438                 return 0;
439
440         HWRM_PREP(req, PORT_MAC_CFG);
441
442         if (ptp->rx_filter)
443                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
444         else
445                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
446         if (ptp->tx_tstamp_en)
447                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
448         else
449                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
450         req.flags = rte_cpu_to_le_32(flags);
451         req.enables =
452         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
453         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
454
455         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
456         HWRM_UNLOCK();
457
458         return rc;
459 }
460
461 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
462 {
463         int rc = 0;
464         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
465         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
466         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
467
468 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
469         if (ptp)
470                 return 0;
471
472         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
473
474         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
475
476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
477
478         HWRM_CHECK_RESULT();
479
480         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
481                 return 0;
482
483         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
484         if (!ptp)
485                 return -ENOMEM;
486
487         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
488                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
489         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
491         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
493         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
494                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
495         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
496                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
497         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
498                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
499         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
501         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
502                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
503         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
504                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
505
506         ptp->bp = bp;
507         bp->ptp_cfg = ptp;
508
509         return 0;
510 }
511
512 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
513 {
514         int rc = 0;
515         struct hwrm_func_qcaps_input req = {.req_type = 0 };
516         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
517         uint16_t new_max_vfs;
518         uint32_t flags;
519         int i;
520
521         HWRM_PREP(req, FUNC_QCAPS);
522
523         req.fid = rte_cpu_to_le_16(0xffff);
524
525         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
526
527         HWRM_CHECK_RESULT();
528
529         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
530         flags = rte_le_to_cpu_32(resp->flags);
531         if (BNXT_PF(bp)) {
532                 bp->pf.port_id = resp->port_id;
533                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
534                 new_max_vfs = bp->pdev->max_vfs;
535                 if (new_max_vfs != bp->pf.max_vfs) {
536                         if (bp->pf.vf_info)
537                                 rte_free(bp->pf.vf_info);
538                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
539                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
540                         bp->pf.max_vfs = new_max_vfs;
541                         for (i = 0; i < new_max_vfs; i++) {
542                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
543                                 bp->pf.vf_info[i].vlan_table =
544                                         rte_zmalloc("VF VLAN table",
545                                                     getpagesize(),
546                                                     getpagesize());
547                                 if (bp->pf.vf_info[i].vlan_table == NULL)
548                                         RTE_LOG(ERR, PMD,
549                                         "Fail to alloc VLAN table for VF %d\n",
550                                         i);
551                                 else
552                                         rte_mem_lock_page(
553                                                 bp->pf.vf_info[i].vlan_table);
554                                 bp->pf.vf_info[i].vlan_as_table =
555                                         rte_zmalloc("VF VLAN AS table",
556                                                     getpagesize(),
557                                                     getpagesize());
558                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
559                                         RTE_LOG(ERR, PMD,
560                                         "Alloc VLAN AS table for VF %d fail\n",
561                                         i);
562                                 else
563                                         rte_mem_lock_page(
564                                                bp->pf.vf_info[i].vlan_as_table);
565                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
566                         }
567                 }
568         }
569
570         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
571         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
572         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
573         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
574         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
575         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
576         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
577         /* TODO: For now, do not support VMDq/RFS on VFs. */
578         if (BNXT_PF(bp)) {
579                 if (bp->pf.max_vfs)
580                         bp->max_vnics = 1;
581                 else
582                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
583         } else {
584                 bp->max_vnics = 1;
585         }
586         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
587         if (BNXT_PF(bp)) {
588                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
589                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
590                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
591                         RTE_LOG(INFO, PMD, "PTP SUPPORTED");
592                         HWRM_UNLOCK();
593                         bnxt_hwrm_ptp_qcfg(bp);
594                 }
595         }
596
597         HWRM_UNLOCK();
598
599         return rc;
600 }
601
602 int bnxt_hwrm_func_reset(struct bnxt *bp)
603 {
604         int rc = 0;
605         struct hwrm_func_reset_input req = {.req_type = 0 };
606         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
607
608         HWRM_PREP(req, FUNC_RESET);
609
610         req.enables = rte_cpu_to_le_32(0);
611
612         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
613
614         HWRM_CHECK_RESULT();
615         HWRM_UNLOCK();
616
617         return rc;
618 }
619
620 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
621 {
622         int rc;
623         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
624         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
625
626         if (bp->flags & BNXT_FLAG_REGISTERED)
627                 return 0;
628
629         HWRM_PREP(req, FUNC_DRV_RGTR);
630         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
631                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
632         req.ver_maj = RTE_VER_YEAR;
633         req.ver_min = RTE_VER_MONTH;
634         req.ver_upd = RTE_VER_MINOR;
635
636         if (BNXT_PF(bp)) {
637                 req.enables |= rte_cpu_to_le_32(
638                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
639                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
640                        RTE_MIN(sizeof(req.vf_req_fwd),
641                                sizeof(bp->pf.vf_req_fwd)));
642         }
643
644         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
645         //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
646
647         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
648
649         HWRM_CHECK_RESULT();
650         HWRM_UNLOCK();
651
652         bp->flags |= BNXT_FLAG_REGISTERED;
653
654         return rc;
655 }
656
657 int bnxt_hwrm_ver_get(struct bnxt *bp)
658 {
659         int rc = 0;
660         struct hwrm_ver_get_input req = {.req_type = 0 };
661         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
662         uint32_t my_version;
663         uint32_t fw_version;
664         uint16_t max_resp_len;
665         char type[RTE_MEMZONE_NAMESIZE];
666         uint32_t dev_caps_cfg;
667
668         bp->max_req_len = HWRM_MAX_REQ_LEN;
669         HWRM_PREP(req, VER_GET);
670
671         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
672         req.hwrm_intf_min = HWRM_VERSION_MINOR;
673         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
674
675         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
676
677         HWRM_CHECK_RESULT();
678
679         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
680                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
681                 resp->hwrm_intf_upd,
682                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
683         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
684                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
685         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
686                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
687
688         my_version = HWRM_VERSION_MAJOR << 16;
689         my_version |= HWRM_VERSION_MINOR << 8;
690         my_version |= HWRM_VERSION_UPDATE;
691
692         fw_version = resp->hwrm_intf_maj << 16;
693         fw_version |= resp->hwrm_intf_min << 8;
694         fw_version |= resp->hwrm_intf_upd;
695
696         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
697                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
698                 rc = -EINVAL;
699                 goto error;
700         }
701
702         if (my_version != fw_version) {
703                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
704                 if (my_version < fw_version) {
705                         RTE_LOG(INFO, PMD,
706                                 "Firmware API version is newer than driver.\n");
707                         RTE_LOG(INFO, PMD,
708                                 "The driver may be missing features.\n");
709                 } else {
710                         RTE_LOG(INFO, PMD,
711                                 "Firmware API version is older than driver.\n");
712                         RTE_LOG(INFO, PMD,
713                                 "Not all driver features may be functional.\n");
714                 }
715         }
716
717         if (bp->max_req_len > resp->max_req_win_len) {
718                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
719                 rc = -EINVAL;
720         }
721         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
722         max_resp_len = resp->max_resp_len;
723         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
724
725         if (bp->max_resp_len != max_resp_len) {
726                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
727                         bp->pdev->addr.domain, bp->pdev->addr.bus,
728                         bp->pdev->addr.devid, bp->pdev->addr.function);
729
730                 rte_free(bp->hwrm_cmd_resp_addr);
731
732                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
733                 if (bp->hwrm_cmd_resp_addr == NULL) {
734                         rc = -ENOMEM;
735                         goto error;
736                 }
737                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
738                 bp->hwrm_cmd_resp_dma_addr =
739                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
740                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
741                         RTE_LOG(ERR, PMD,
742                         "Unable to map response buffer to physical memory.\n");
743                         rc = -ENOMEM;
744                         goto error;
745                 }
746                 bp->max_resp_len = max_resp_len;
747         }
748
749         if ((dev_caps_cfg &
750                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
751             (dev_caps_cfg &
752              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
753                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
754
755                 rte_free(bp->hwrm_short_cmd_req_addr);
756
757                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
758                                                         bp->max_req_len, 0);
759                 if (bp->hwrm_short_cmd_req_addr == NULL) {
760                         rc = -ENOMEM;
761                         goto error;
762                 }
763                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
764                 bp->hwrm_short_cmd_req_dma_addr =
765                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
766                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
767                         rte_free(bp->hwrm_short_cmd_req_addr);
768                         RTE_LOG(ERR, PMD,
769                                 "Unable to map buffer to physical memory.\n");
770                         rc = -ENOMEM;
771                         goto error;
772                 }
773
774                 bp->flags |= BNXT_FLAG_SHORT_CMD;
775         }
776
777 error:
778         HWRM_UNLOCK();
779         return rc;
780 }
781
782 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
783 {
784         int rc;
785         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
786         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
787
788         if (!(bp->flags & BNXT_FLAG_REGISTERED))
789                 return 0;
790
791         HWRM_PREP(req, FUNC_DRV_UNRGTR);
792         req.flags = flags;
793
794         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
795
796         HWRM_CHECK_RESULT();
797         HWRM_UNLOCK();
798
799         bp->flags &= ~BNXT_FLAG_REGISTERED;
800
801         return rc;
802 }
803
804 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
805 {
806         int rc = 0;
807         struct hwrm_port_phy_cfg_input req = {0};
808         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
809         uint32_t enables = 0;
810
811         HWRM_PREP(req, PORT_PHY_CFG);
812
813         if (conf->link_up) {
814                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
815                 if (bp->link_info.auto_mode && conf->link_speed) {
816                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
817                         RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
818                 }
819
820                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
821                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
822                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
823                 /*
824                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
825                  * any auto mode, even "none".
826                  */
827                 if (!conf->link_speed) {
828                         /* No speeds specified. Enable AutoNeg - all speeds */
829                         req.auto_mode =
830                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
831                 }
832                 /* AutoNeg - Advertise speeds specified. */
833                 if (conf->auto_link_speed_mask) {
834                         req.auto_mode =
835                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
836                         req.auto_link_speed_mask =
837                                 conf->auto_link_speed_mask;
838                         enables |=
839                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
840                 }
841
842                 req.auto_duplex = conf->duplex;
843                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
844                 req.auto_pause = conf->auto_pause;
845                 req.force_pause = conf->force_pause;
846                 /* Set force_pause if there is no auto or if there is a force */
847                 if (req.auto_pause && !req.force_pause)
848                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
849                 else
850                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
851
852                 req.enables = rte_cpu_to_le_32(enables);
853         } else {
854                 req.flags =
855                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
856                 RTE_LOG(INFO, PMD, "Force Link Down\n");
857         }
858
859         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
860
861         HWRM_CHECK_RESULT();
862         HWRM_UNLOCK();
863
864         return rc;
865 }
866
867 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
868                                    struct bnxt_link_info *link_info)
869 {
870         int rc = 0;
871         struct hwrm_port_phy_qcfg_input req = {0};
872         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
873
874         HWRM_PREP(req, PORT_PHY_QCFG);
875
876         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
877
878         HWRM_CHECK_RESULT();
879
880         link_info->phy_link_status = resp->link;
881         link_info->link_up =
882                 (link_info->phy_link_status ==
883                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
884         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
885         link_info->duplex = resp->duplex_cfg;
886         link_info->pause = resp->pause;
887         link_info->auto_pause = resp->auto_pause;
888         link_info->force_pause = resp->force_pause;
889         link_info->auto_mode = resp->auto_mode;
890         link_info->phy_type = resp->phy_type;
891         link_info->media_type = resp->media_type;
892
893         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
894         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
895         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
896         link_info->phy_ver[0] = resp->phy_maj;
897         link_info->phy_ver[1] = resp->phy_min;
898         link_info->phy_ver[2] = resp->phy_bld;
899
900         HWRM_UNLOCK();
901
902         return rc;
903 }
904
905 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
906 {
907         int rc = 0;
908         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
909         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
910
911         HWRM_PREP(req, QUEUE_QPORTCFG);
912
913         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
914
915         HWRM_CHECK_RESULT();
916
917 #define GET_QUEUE_INFO(x) \
918         bp->cos_queue[x].id = resp->queue_id##x; \
919         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
920
921         GET_QUEUE_INFO(0);
922         GET_QUEUE_INFO(1);
923         GET_QUEUE_INFO(2);
924         GET_QUEUE_INFO(3);
925         GET_QUEUE_INFO(4);
926         GET_QUEUE_INFO(5);
927         GET_QUEUE_INFO(6);
928         GET_QUEUE_INFO(7);
929
930         HWRM_UNLOCK();
931
932         return rc;
933 }
934
935 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
936                          struct bnxt_ring *ring,
937                          uint32_t ring_type, uint32_t map_index,
938                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
939 {
940         int rc = 0;
941         uint32_t enables = 0;
942         struct hwrm_ring_alloc_input req = {.req_type = 0 };
943         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
944
945         HWRM_PREP(req, RING_ALLOC);
946
947         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
948         req.fbo = rte_cpu_to_le_32(0);
949         /* Association of ring index with doorbell index */
950         req.logical_id = rte_cpu_to_le_16(map_index);
951         req.length = rte_cpu_to_le_32(ring->ring_size);
952
953         switch (ring_type) {
954         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
955                 req.queue_id = bp->cos_queue[0].id;
956                 /* FALLTHROUGH */
957         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
958                 req.ring_type = ring_type;
959                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
960                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
961                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
962                         enables |=
963                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
964                 break;
965         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
966                 req.ring_type = ring_type;
967                 /*
968                  * TODO: Some HWRM versions crash with
969                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
970                  */
971                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
972                 break;
973         default:
974                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
975                         ring_type);
976                 HWRM_UNLOCK();
977                 return -1;
978         }
979         req.enables = rte_cpu_to_le_32(enables);
980
981         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
982
983         if (rc || resp->error_code) {
984                 if (rc == 0 && resp->error_code)
985                         rc = rte_le_to_cpu_16(resp->error_code);
986                 switch (ring_type) {
987                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
988                         RTE_LOG(ERR, PMD,
989                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
990                         HWRM_UNLOCK();
991                         return rc;
992                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
993                         RTE_LOG(ERR, PMD,
994                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
995                         HWRM_UNLOCK();
996                         return rc;
997                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
998                         RTE_LOG(ERR, PMD,
999                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1000                         HWRM_UNLOCK();
1001                         return rc;
1002                 default:
1003                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
1004                         HWRM_UNLOCK();
1005                         return rc;
1006                 }
1007         }
1008
1009         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1010         HWRM_UNLOCK();
1011         return rc;
1012 }
1013
1014 int bnxt_hwrm_ring_free(struct bnxt *bp,
1015                         struct bnxt_ring *ring, uint32_t ring_type)
1016 {
1017         int rc;
1018         struct hwrm_ring_free_input req = {.req_type = 0 };
1019         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1020
1021         HWRM_PREP(req, RING_FREE);
1022
1023         req.ring_type = ring_type;
1024         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1025
1026         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1027
1028         if (rc || resp->error_code) {
1029                 if (rc == 0 && resp->error_code)
1030                         rc = rte_le_to_cpu_16(resp->error_code);
1031                 HWRM_UNLOCK();
1032
1033                 switch (ring_type) {
1034                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1035                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
1036                                 rc);
1037                         return rc;
1038                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1039                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
1040                                 rc);
1041                         return rc;
1042                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1043                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
1044                                 rc);
1045                         return rc;
1046                 default:
1047                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
1048                         return rc;
1049                 }
1050         }
1051         HWRM_UNLOCK();
1052         return 0;
1053 }
1054
1055 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1056 {
1057         int rc = 0;
1058         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1059         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1060
1061         HWRM_PREP(req, RING_GRP_ALLOC);
1062
1063         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1064         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1065         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1066         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1067
1068         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1069
1070         HWRM_CHECK_RESULT();
1071
1072         bp->grp_info[idx].fw_grp_id =
1073             rte_le_to_cpu_16(resp->ring_group_id);
1074
1075         HWRM_UNLOCK();
1076
1077         return rc;
1078 }
1079
1080 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1081 {
1082         int rc;
1083         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1084         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1085
1086         HWRM_PREP(req, RING_GRP_FREE);
1087
1088         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1089
1090         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1091
1092         HWRM_CHECK_RESULT();
1093         HWRM_UNLOCK();
1094
1095         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1096         return rc;
1097 }
1098
1099 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1100 {
1101         int rc = 0;
1102         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1103         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1104
1105         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1106                 return rc;
1107
1108         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1109
1110         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1111
1112         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1113
1114         HWRM_CHECK_RESULT();
1115         HWRM_UNLOCK();
1116
1117         return rc;
1118 }
1119
1120 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1121                                 unsigned int idx __rte_unused)
1122 {
1123         int rc;
1124         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1125         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1126
1127         HWRM_PREP(req, STAT_CTX_ALLOC);
1128
1129         req.update_period_ms = rte_cpu_to_le_32(0);
1130
1131         req.stats_dma_addr =
1132             rte_cpu_to_le_64(cpr->hw_stats_map);
1133
1134         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1135
1136         HWRM_CHECK_RESULT();
1137
1138         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1139
1140         HWRM_UNLOCK();
1141         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
1142
1143         return rc;
1144 }
1145
1146 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1147                                 unsigned int idx __rte_unused)
1148 {
1149         int rc;
1150         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1151         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1152
1153         HWRM_PREP(req, STAT_CTX_FREE);
1154
1155         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1156
1157         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1158
1159         HWRM_CHECK_RESULT();
1160         HWRM_UNLOCK();
1161
1162         return rc;
1163 }
1164
1165 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1166 {
1167         int rc = 0, i, j;
1168         struct hwrm_vnic_alloc_input req = { 0 };
1169         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1170
1171         /* map ring groups to this vnic */
1172         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1173                 vnic->start_grp_id, vnic->end_grp_id);
1174         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1175                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1176         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1177         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1178         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1179         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1180         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1181                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1182         HWRM_PREP(req, VNIC_ALLOC);
1183
1184         if (vnic->func_default)
1185                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1186         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1187
1188         HWRM_CHECK_RESULT();
1189
1190         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1191         HWRM_UNLOCK();
1192         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1193         return rc;
1194 }
1195
1196 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1197                                         struct bnxt_vnic_info *vnic,
1198                                         struct bnxt_plcmodes_cfg *pmode)
1199 {
1200         int rc = 0;
1201         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1202         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1203
1204         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1205
1206         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1207
1208         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1209
1210         HWRM_CHECK_RESULT();
1211
1212         pmode->flags = rte_le_to_cpu_32(resp->flags);
1213         /* dflt_vnic bit doesn't exist in the _cfg command */
1214         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1215         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1216         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1217         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1218
1219         HWRM_UNLOCK();
1220
1221         return rc;
1222 }
1223
1224 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1225                                        struct bnxt_vnic_info *vnic,
1226                                        struct bnxt_plcmodes_cfg *pmode)
1227 {
1228         int rc = 0;
1229         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1230         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1231
1232         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1233
1234         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1235         req.flags = rte_cpu_to_le_32(pmode->flags);
1236         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1237         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1238         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1239         req.enables = rte_cpu_to_le_32(
1240             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1241             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1242             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1243         );
1244
1245         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1246
1247         HWRM_CHECK_RESULT();
1248         HWRM_UNLOCK();
1249
1250         return rc;
1251 }
1252
1253 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1254 {
1255         int rc = 0;
1256         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1257         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1258         uint32_t ctx_enable_flag = 0;
1259         struct bnxt_plcmodes_cfg pmodes;
1260
1261         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1262                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1263                 return rc;
1264         }
1265
1266         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1267         if (rc)
1268                 return rc;
1269
1270         HWRM_PREP(req, VNIC_CFG);
1271
1272         /* Only RSS support for now TBD: COS & LB */
1273         req.enables =
1274             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1275         if (vnic->lb_rule != 0xffff)
1276                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1277         if (vnic->cos_rule != 0xffff)
1278                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1279         if (vnic->rss_rule != 0xffff) {
1280                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1281                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1282         }
1283         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1284         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1285         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1286         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1287         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1288         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1289         req.mru = rte_cpu_to_le_16(vnic->mru);
1290         if (vnic->func_default)
1291                 req.flags |=
1292                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1293         if (vnic->vlan_strip)
1294                 req.flags |=
1295                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1296         if (vnic->bd_stall)
1297                 req.flags |=
1298                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1299         if (vnic->roce_dual)
1300                 req.flags |= rte_cpu_to_le_32(
1301                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1302         if (vnic->roce_only)
1303                 req.flags |= rte_cpu_to_le_32(
1304                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1305         if (vnic->rss_dflt_cr)
1306                 req.flags |= rte_cpu_to_le_32(
1307                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1308
1309         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1310
1311         HWRM_CHECK_RESULT();
1312         HWRM_UNLOCK();
1313
1314         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1315
1316         return rc;
1317 }
1318
1319 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1320                 int16_t fw_vf_id)
1321 {
1322         int rc = 0;
1323         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1324         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1325
1326         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1327                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1328                 return rc;
1329         }
1330         HWRM_PREP(req, VNIC_QCFG);
1331
1332         req.enables =
1333                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1334         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1335         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1336
1337         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1338
1339         HWRM_CHECK_RESULT();
1340
1341         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1342         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1343         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1344         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1345         vnic->mru = rte_le_to_cpu_16(resp->mru);
1346         vnic->func_default = rte_le_to_cpu_32(
1347                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1348         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1349                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1350         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1351                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1352         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1353                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1354         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1355                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1356         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1357                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1358
1359         HWRM_UNLOCK();
1360
1361         return rc;
1362 }
1363
1364 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1365 {
1366         int rc = 0;
1367         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1368         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1369                                                 bp->hwrm_cmd_resp_addr;
1370
1371         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1372
1373         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1374
1375         HWRM_CHECK_RESULT();
1376
1377         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1378         HWRM_UNLOCK();
1379         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1380
1381         return rc;
1382 }
1383
1384 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1385 {
1386         int rc = 0;
1387         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1388         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1389                                                 bp->hwrm_cmd_resp_addr;
1390
1391         if (vnic->rss_rule == 0xffff) {
1392                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1393                 return rc;
1394         }
1395         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1396
1397         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1398
1399         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1400
1401         HWRM_CHECK_RESULT();
1402         HWRM_UNLOCK();
1403
1404         vnic->rss_rule = INVALID_HW_RING_ID;
1405
1406         return rc;
1407 }
1408
1409 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1410 {
1411         int rc = 0;
1412         struct hwrm_vnic_free_input req = {.req_type = 0 };
1413         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1414
1415         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1416                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1417                 return rc;
1418         }
1419
1420         HWRM_PREP(req, VNIC_FREE);
1421
1422         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1423
1424         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1425
1426         HWRM_CHECK_RESULT();
1427         HWRM_UNLOCK();
1428
1429         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1430         return rc;
1431 }
1432
1433 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1434                            struct bnxt_vnic_info *vnic)
1435 {
1436         int rc = 0;
1437         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1438         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1439
1440         HWRM_PREP(req, VNIC_RSS_CFG);
1441
1442         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1443
1444         req.ring_grp_tbl_addr =
1445             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1446         req.hash_key_tbl_addr =
1447             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1448         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1449
1450         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1451
1452         HWRM_CHECK_RESULT();
1453         HWRM_UNLOCK();
1454
1455         return rc;
1456 }
1457
1458 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1459                         struct bnxt_vnic_info *vnic)
1460 {
1461         int rc = 0;
1462         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1463         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1464         uint16_t size;
1465
1466         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1467
1468         req.flags = rte_cpu_to_le_32(
1469                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1470
1471         req.enables = rte_cpu_to_le_32(
1472                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1473
1474         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1475         size -= RTE_PKTMBUF_HEADROOM;
1476
1477         req.jumbo_thresh = rte_cpu_to_le_16(size);
1478         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1479
1480         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1481
1482         HWRM_CHECK_RESULT();
1483         HWRM_UNLOCK();
1484
1485         return rc;
1486 }
1487
1488 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1489                         struct bnxt_vnic_info *vnic, bool enable)
1490 {
1491         int rc = 0;
1492         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1493         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1494
1495         HWRM_PREP(req, VNIC_TPA_CFG);
1496
1497         if (enable) {
1498                 req.enables = rte_cpu_to_le_32(
1499                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1500                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1501                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1502                 req.flags = rte_cpu_to_le_32(
1503                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1504                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1505                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1506                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1507                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1508                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1509                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1510                 req.max_agg_segs = rte_cpu_to_le_16(5);
1511                 req.max_aggs =
1512                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1513                 req.min_agg_len = rte_cpu_to_le_32(512);
1514         }
1515
1516         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1517
1518         HWRM_CHECK_RESULT();
1519         HWRM_UNLOCK();
1520
1521         return rc;
1522 }
1523
1524 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1525 {
1526         struct hwrm_func_cfg_input req = {0};
1527         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1528         int rc;
1529
1530         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1531         req.enables = rte_cpu_to_le_32(
1532                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1533         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1534         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1535
1536         HWRM_PREP(req, FUNC_CFG);
1537
1538         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1539         HWRM_CHECK_RESULT();
1540         HWRM_UNLOCK();
1541
1542         bp->pf.vf_info[vf].random_mac = false;
1543
1544         return rc;
1545 }
1546
1547 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1548                                   uint64_t *dropped)
1549 {
1550         int rc = 0;
1551         struct hwrm_func_qstats_input req = {.req_type = 0};
1552         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1553
1554         HWRM_PREP(req, FUNC_QSTATS);
1555
1556         req.fid = rte_cpu_to_le_16(fid);
1557
1558         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1559
1560         HWRM_CHECK_RESULT();
1561
1562         if (dropped)
1563                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1564
1565         HWRM_UNLOCK();
1566
1567         return rc;
1568 }
1569
1570 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1571                           struct rte_eth_stats *stats)
1572 {
1573         int rc = 0;
1574         struct hwrm_func_qstats_input req = {.req_type = 0};
1575         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1576
1577         HWRM_PREP(req, FUNC_QSTATS);
1578
1579         req.fid = rte_cpu_to_le_16(fid);
1580
1581         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1582
1583         HWRM_CHECK_RESULT();
1584
1585         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1586         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1587         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1588         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1589         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1590         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1591
1592         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1593         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1594         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1595         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1596         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1597         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1598
1599         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1600         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1601
1602         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1603
1604         HWRM_UNLOCK();
1605
1606         return rc;
1607 }
1608
1609 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1610 {
1611         int rc = 0;
1612         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1613         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1614
1615         HWRM_PREP(req, FUNC_CLR_STATS);
1616
1617         req.fid = rte_cpu_to_le_16(fid);
1618
1619         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1620
1621         HWRM_CHECK_RESULT();
1622         HWRM_UNLOCK();
1623
1624         return rc;
1625 }
1626
1627 /*
1628  * HWRM utility functions
1629  */
1630
1631 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1632 {
1633         unsigned int i;
1634         int rc = 0;
1635
1636         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1637                 struct bnxt_tx_queue *txq;
1638                 struct bnxt_rx_queue *rxq;
1639                 struct bnxt_cp_ring_info *cpr;
1640
1641                 if (i >= bp->rx_cp_nr_rings) {
1642                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1643                         cpr = txq->cp_ring;
1644                 } else {
1645                         rxq = bp->rx_queues[i];
1646                         cpr = rxq->cp_ring;
1647                 }
1648
1649                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1650                 if (rc)
1651                         return rc;
1652         }
1653         return 0;
1654 }
1655
1656 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1657 {
1658         int rc;
1659         unsigned int i;
1660         struct bnxt_cp_ring_info *cpr;
1661
1662         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1663
1664                 if (i >= bp->rx_cp_nr_rings)
1665                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1666                 else
1667                         cpr = bp->rx_queues[i]->cp_ring;
1668                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1669                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1670                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1671                         /*
1672                          * TODO. Need a better way to reset grp_info.stats_ctx
1673                          * for Rx rings only. stats_ctx is not saved for Tx
1674                          * in grp_info.
1675                          */
1676                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1677                         if (rc)
1678                                 return rc;
1679                 }
1680         }
1681         return 0;
1682 }
1683
1684 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1685 {
1686         unsigned int i;
1687         int rc = 0;
1688
1689         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1690                 struct bnxt_tx_queue *txq;
1691                 struct bnxt_rx_queue *rxq;
1692                 struct bnxt_cp_ring_info *cpr;
1693
1694                 if (i >= bp->rx_cp_nr_rings) {
1695                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1696                         cpr = txq->cp_ring;
1697                 } else {
1698                         rxq = bp->rx_queues[i];
1699                         cpr = rxq->cp_ring;
1700                 }
1701
1702                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1703
1704                 if (rc)
1705                         return rc;
1706         }
1707         return rc;
1708 }
1709
1710 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1711 {
1712         uint16_t idx;
1713         uint32_t rc = 0;
1714
1715         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1716
1717                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1718                         continue;
1719
1720                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1721
1722                 if (rc)
1723                         return rc;
1724         }
1725         return rc;
1726 }
1727
1728 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1729                                 unsigned int idx __rte_unused)
1730 {
1731         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1732
1733         bnxt_hwrm_ring_free(bp, cp_ring,
1734                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1735         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1736         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1737         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1738                         sizeof(*cpr->cp_desc_ring));
1739         cpr->cp_raw_cons = 0;
1740 }
1741
1742 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1743 {
1744         unsigned int i;
1745         int rc = 0;
1746
1747         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1748                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1749                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1750                 struct bnxt_ring *ring = txr->tx_ring_struct;
1751                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1752                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1753
1754                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1755                         bnxt_hwrm_ring_free(bp, ring,
1756                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1757                         ring->fw_ring_id = INVALID_HW_RING_ID;
1758                         memset(txr->tx_desc_ring, 0,
1759                                         txr->tx_ring_struct->ring_size *
1760                                         sizeof(*txr->tx_desc_ring));
1761                         memset(txr->tx_buf_ring, 0,
1762                                         txr->tx_ring_struct->ring_size *
1763                                         sizeof(*txr->tx_buf_ring));
1764                         txr->tx_prod = 0;
1765                         txr->tx_cons = 0;
1766                 }
1767                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1768                         bnxt_free_cp_ring(bp, cpr, idx);
1769                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1770                 }
1771         }
1772
1773         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1774                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1775                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1776                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1777                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1778                 unsigned int idx = i + 1;
1779
1780                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1781                         bnxt_hwrm_ring_free(bp, ring,
1782                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1783                         ring->fw_ring_id = INVALID_HW_RING_ID;
1784                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1785                         memset(rxr->rx_desc_ring, 0,
1786                                         rxr->rx_ring_struct->ring_size *
1787                                         sizeof(*rxr->rx_desc_ring));
1788                         memset(rxr->rx_buf_ring, 0,
1789                                         rxr->rx_ring_struct->ring_size *
1790                                         sizeof(*rxr->rx_buf_ring));
1791                         rxr->rx_prod = 0;
1792                         memset(rxr->ag_buf_ring, 0,
1793                                         rxr->ag_ring_struct->ring_size *
1794                                         sizeof(*rxr->ag_buf_ring));
1795                         rxr->ag_prod = 0;
1796                 }
1797                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1798                         bnxt_free_cp_ring(bp, cpr, idx);
1799                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1800                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1801                 }
1802         }
1803
1804         /* Default completion ring */
1805         {
1806                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1807
1808                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1809                         bnxt_free_cp_ring(bp, cpr, 0);
1810                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1811                 }
1812         }
1813
1814         return rc;
1815 }
1816
1817 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1818 {
1819         uint16_t i;
1820         uint32_t rc = 0;
1821
1822         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1823                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1824                 if (rc)
1825                         return rc;
1826         }
1827         return rc;
1828 }
1829
1830 void bnxt_free_hwrm_resources(struct bnxt *bp)
1831 {
1832         /* Release memzone */
1833         rte_free(bp->hwrm_cmd_resp_addr);
1834         rte_free(bp->hwrm_short_cmd_req_addr);
1835         bp->hwrm_cmd_resp_addr = NULL;
1836         bp->hwrm_short_cmd_req_addr = NULL;
1837         bp->hwrm_cmd_resp_dma_addr = 0;
1838         bp->hwrm_short_cmd_req_dma_addr = 0;
1839 }
1840
1841 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1842 {
1843         struct rte_pci_device *pdev = bp->pdev;
1844         char type[RTE_MEMZONE_NAMESIZE];
1845
1846         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1847                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1848         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1849         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1850         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1851         if (bp->hwrm_cmd_resp_addr == NULL)
1852                 return -ENOMEM;
1853         bp->hwrm_cmd_resp_dma_addr =
1854                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1855         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1856                 RTE_LOG(ERR, PMD,
1857                         "unable to map response address to physical memory\n");
1858                 return -ENOMEM;
1859         }
1860         rte_spinlock_init(&bp->hwrm_lock);
1861
1862         return 0;
1863 }
1864
1865 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1866 {
1867         struct bnxt_filter_info *filter;
1868         int rc = 0;
1869
1870         STAILQ_FOREACH(filter, &vnic->filter, next) {
1871                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1872                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1873                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1874                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1875                 else
1876                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1877                 //if (rc)
1878                         //break;
1879         }
1880         return rc;
1881 }
1882
1883 static int
1884 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1885 {
1886         struct bnxt_filter_info *filter;
1887         struct rte_flow *flow;
1888         int rc = 0;
1889
1890         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1891                 filter = flow->filter;
1892                 RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
1893                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1894                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1895                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1896                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1897                 else
1898                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1899
1900                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1901                 rte_free(flow);
1902                 //if (rc)
1903                         //break;
1904         }
1905         return rc;
1906 }
1907
1908 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1909 {
1910         struct bnxt_filter_info *filter;
1911         int rc = 0;
1912
1913         STAILQ_FOREACH(filter, &vnic->filter, next) {
1914                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1915                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1916                                                      filter);
1917                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1918                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1919                                                          filter);
1920                 else
1921                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1922                                                      filter);
1923                 if (rc)
1924                         break;
1925         }
1926         return rc;
1927 }
1928
1929 void bnxt_free_tunnel_ports(struct bnxt *bp)
1930 {
1931         if (bp->vxlan_port_cnt)
1932                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1933                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1934         bp->vxlan_port = 0;
1935         if (bp->geneve_port_cnt)
1936                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1937                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1938         bp->geneve_port = 0;
1939 }
1940
1941 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1942 {
1943         int i;
1944
1945         if (bp->vnic_info == NULL)
1946                 return;
1947
1948         /*
1949          * Cleanup VNICs in reverse order, to make sure the L2 filter
1950          * from vnic0 is last to be cleaned up.
1951          */
1952         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1953                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1954
1955                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1956
1957                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1958
1959                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1960
1961                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1962
1963                 bnxt_hwrm_vnic_free(bp, vnic);
1964         }
1965         /* Ring resources */
1966         bnxt_free_all_hwrm_rings(bp);
1967         bnxt_free_all_hwrm_ring_grps(bp);
1968         bnxt_free_all_hwrm_stat_ctxs(bp);
1969         bnxt_free_tunnel_ports(bp);
1970 }
1971
1972 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1973 {
1974         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1975
1976         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1977                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1978
1979         switch (conf_link_speed) {
1980         case ETH_LINK_SPEED_10M_HD:
1981         case ETH_LINK_SPEED_100M_HD:
1982                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1983         }
1984         return hw_link_duplex;
1985 }
1986
1987 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1988 {
1989         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1990 }
1991
1992 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1993 {
1994         uint16_t eth_link_speed = 0;
1995
1996         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1997                 return ETH_LINK_SPEED_AUTONEG;
1998
1999         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2000         case ETH_LINK_SPEED_100M:
2001         case ETH_LINK_SPEED_100M_HD:
2002                 eth_link_speed =
2003                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2004                 break;
2005         case ETH_LINK_SPEED_1G:
2006                 eth_link_speed =
2007                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2008                 break;
2009         case ETH_LINK_SPEED_2_5G:
2010                 eth_link_speed =
2011                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2012                 break;
2013         case ETH_LINK_SPEED_10G:
2014                 eth_link_speed =
2015                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2016                 break;
2017         case ETH_LINK_SPEED_20G:
2018                 eth_link_speed =
2019                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2020                 break;
2021         case ETH_LINK_SPEED_25G:
2022                 eth_link_speed =
2023                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2024                 break;
2025         case ETH_LINK_SPEED_40G:
2026                 eth_link_speed =
2027                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2028                 break;
2029         case ETH_LINK_SPEED_50G:
2030                 eth_link_speed =
2031                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2032                 break;
2033         default:
2034                 RTE_LOG(ERR, PMD,
2035                         "Unsupported link speed %d; default to AUTO\n",
2036                         conf_link_speed);
2037                 break;
2038         }
2039         return eth_link_speed;
2040 }
2041
2042 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2043                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2044                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2045                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
2046
2047 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2048 {
2049         uint32_t one_speed;
2050
2051         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2052                 return 0;
2053
2054         if (link_speed & ETH_LINK_SPEED_FIXED) {
2055                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2056
2057                 if (one_speed & (one_speed - 1)) {
2058                         RTE_LOG(ERR, PMD,
2059                                 "Invalid advertised speeds (%u) for port %u\n",
2060                                 link_speed, port_id);
2061                         return -EINVAL;
2062                 }
2063                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2064                         RTE_LOG(ERR, PMD,
2065                                 "Unsupported advertised speed (%u) for port %u\n",
2066                                 link_speed, port_id);
2067                         return -EINVAL;
2068                 }
2069         } else {
2070                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2071                         RTE_LOG(ERR, PMD,
2072                                 "Unsupported advertised speeds (%u) for port %u\n",
2073                                 link_speed, port_id);
2074                         return -EINVAL;
2075                 }
2076         }
2077         return 0;
2078 }
2079
2080 static uint16_t
2081 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2082 {
2083         uint16_t ret = 0;
2084
2085         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2086                 if (bp->link_info.support_speeds)
2087                         return bp->link_info.support_speeds;
2088                 link_speed = BNXT_SUPPORTED_SPEEDS;
2089         }
2090
2091         if (link_speed & ETH_LINK_SPEED_100M)
2092                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2093         if (link_speed & ETH_LINK_SPEED_100M_HD)
2094                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2095         if (link_speed & ETH_LINK_SPEED_1G)
2096                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2097         if (link_speed & ETH_LINK_SPEED_2_5G)
2098                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2099         if (link_speed & ETH_LINK_SPEED_10G)
2100                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2101         if (link_speed & ETH_LINK_SPEED_20G)
2102                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2103         if (link_speed & ETH_LINK_SPEED_25G)
2104                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2105         if (link_speed & ETH_LINK_SPEED_40G)
2106                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2107         if (link_speed & ETH_LINK_SPEED_50G)
2108                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2109         return ret;
2110 }
2111
2112 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2113 {
2114         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2115
2116         switch (hw_link_speed) {
2117         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2118                 eth_link_speed = ETH_SPEED_NUM_100M;
2119                 break;
2120         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2121                 eth_link_speed = ETH_SPEED_NUM_1G;
2122                 break;
2123         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2124                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2125                 break;
2126         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2127                 eth_link_speed = ETH_SPEED_NUM_10G;
2128                 break;
2129         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2130                 eth_link_speed = ETH_SPEED_NUM_20G;
2131                 break;
2132         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2133                 eth_link_speed = ETH_SPEED_NUM_25G;
2134                 break;
2135         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2136                 eth_link_speed = ETH_SPEED_NUM_40G;
2137                 break;
2138         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2139                 eth_link_speed = ETH_SPEED_NUM_50G;
2140                 break;
2141         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2142         default:
2143                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
2144                         hw_link_speed);
2145                 break;
2146         }
2147         return eth_link_speed;
2148 }
2149
2150 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2151 {
2152         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2153
2154         switch (hw_link_duplex) {
2155         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2156         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2157                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2158                 break;
2159         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2160                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2161                 break;
2162         default:
2163                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2164                         hw_link_duplex);
2165                 break;
2166         }
2167         return eth_link_duplex;
2168 }
2169
2170 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2171 {
2172         int rc = 0;
2173         struct bnxt_link_info *link_info = &bp->link_info;
2174
2175         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2176         if (rc) {
2177                 RTE_LOG(ERR, PMD,
2178                         "Get link config failed with rc %d\n", rc);
2179                 goto exit;
2180         }
2181         if (link_info->link_speed)
2182                 link->link_speed =
2183                         bnxt_parse_hw_link_speed(link_info->link_speed);
2184         else
2185                 link->link_speed = ETH_SPEED_NUM_NONE;
2186         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2187         link->link_status = link_info->link_up;
2188         link->link_autoneg = link_info->auto_mode ==
2189                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2190                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2191 exit:
2192         return rc;
2193 }
2194
2195 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2196 {
2197         int rc = 0;
2198         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2199         struct bnxt_link_info link_req;
2200         uint16_t speed, autoneg;
2201
2202         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2203                 return 0;
2204
2205         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2206                         bp->eth_dev->data->port_id);
2207         if (rc)
2208                 goto error;
2209
2210         memset(&link_req, 0, sizeof(link_req));
2211         link_req.link_up = link_up;
2212         if (!link_up)
2213                 goto port_phy_cfg;
2214
2215         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2216         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2217         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2218         if (autoneg == 1) {
2219                 link_req.phy_flags |=
2220                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2221                 link_req.auto_link_speed_mask =
2222                         bnxt_parse_eth_link_speed_mask(bp,
2223                                                        dev_conf->link_speeds);
2224         } else {
2225                 if (bp->link_info.phy_type ==
2226                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2227                     bp->link_info.phy_type ==
2228                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2229                     bp->link_info.media_type ==
2230                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2231                         RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
2232                         return -EINVAL;
2233                 }
2234
2235                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2236                 link_req.link_speed = speed;
2237         }
2238         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2239         link_req.auto_pause = bp->link_info.auto_pause;
2240         link_req.force_pause = bp->link_info.force_pause;
2241
2242 port_phy_cfg:
2243         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2244         if (rc) {
2245                 RTE_LOG(ERR, PMD,
2246                         "Set link config failed with rc %d\n", rc);
2247         }
2248
2249 error:
2250         return rc;
2251 }
2252
2253 /* JIRA 22088 */
2254 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2255 {
2256         struct hwrm_func_qcfg_input req = {0};
2257         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2258         int rc = 0;
2259
2260         HWRM_PREP(req, FUNC_QCFG);
2261         req.fid = rte_cpu_to_le_16(0xffff);
2262
2263         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2264
2265         HWRM_CHECK_RESULT();
2266
2267         /* Hard Coded.. 0xfff VLAN ID mask */
2268         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2269
2270         switch (resp->port_partition_type) {
2271         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2272         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2273         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2274                 bp->port_partition_type = resp->port_partition_type;
2275                 break;
2276         default:
2277                 bp->port_partition_type = 0;
2278                 break;
2279         }
2280
2281         HWRM_UNLOCK();
2282
2283         return rc;
2284 }
2285
2286 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2287                                    struct hwrm_func_qcaps_output *qcaps)
2288 {
2289         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2290         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2291                sizeof(qcaps->mac_address));
2292         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2293         qcaps->max_rx_rings = fcfg->num_rx_rings;
2294         qcaps->max_tx_rings = fcfg->num_tx_rings;
2295         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2296         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2297         qcaps->max_vfs = 0;
2298         qcaps->first_vf_id = 0;
2299         qcaps->max_vnics = fcfg->num_vnics;
2300         qcaps->max_decap_records = 0;
2301         qcaps->max_encap_records = 0;
2302         qcaps->max_tx_wm_flows = 0;
2303         qcaps->max_tx_em_flows = 0;
2304         qcaps->max_rx_wm_flows = 0;
2305         qcaps->max_rx_em_flows = 0;
2306         qcaps->max_flow_id = 0;
2307         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2308         qcaps->max_sp_tx_rings = 0;
2309         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2310 }
2311
2312 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2313 {
2314         struct hwrm_func_cfg_input req = {0};
2315         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2316         int rc;
2317
2318         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2319                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2320                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2321                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2322                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2323                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2324                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2325                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2326                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2327                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2328         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2329         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2330         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2331                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2332         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2333         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2334         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2335         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2336         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2337         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2338         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2339         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2340         req.fid = rte_cpu_to_le_16(0xffff);
2341
2342         HWRM_PREP(req, FUNC_CFG);
2343
2344         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2345
2346         HWRM_CHECK_RESULT();
2347         HWRM_UNLOCK();
2348
2349         return rc;
2350 }
2351
2352 static void populate_vf_func_cfg_req(struct bnxt *bp,
2353                                      struct hwrm_func_cfg_input *req,
2354                                      int num_vfs)
2355 {
2356         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2357                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2358                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2359                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2360                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2361                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2362                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2363                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2364                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2365                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2366
2367         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2368                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2369         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2370                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2371         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2372                                                 (num_vfs + 1));
2373         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2374         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2375                                                (num_vfs + 1));
2376         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2377         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2378         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2379         /* TODO: For now, do not support VMDq/RFS on VFs. */
2380         req->num_vnics = rte_cpu_to_le_16(1);
2381         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2382                                                  (num_vfs + 1));
2383 }
2384
2385 static void add_random_mac_if_needed(struct bnxt *bp,
2386                                      struct hwrm_func_cfg_input *cfg_req,
2387                                      int vf)
2388 {
2389         struct ether_addr mac;
2390
2391         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2392                 return;
2393
2394         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2395                 cfg_req->enables |=
2396                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2397                 eth_random_addr(cfg_req->dflt_mac_addr);
2398                 bp->pf.vf_info[vf].random_mac = true;
2399         } else {
2400                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2401         }
2402 }
2403
2404 static void reserve_resources_from_vf(struct bnxt *bp,
2405                                       struct hwrm_func_cfg_input *cfg_req,
2406                                       int vf)
2407 {
2408         struct hwrm_func_qcaps_input req = {0};
2409         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2410         int rc;
2411
2412         /* Get the actual allocated values now */
2413         HWRM_PREP(req, FUNC_QCAPS);
2414         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2415         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2416
2417         if (rc) {
2418                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2419                 copy_func_cfg_to_qcaps(cfg_req, resp);
2420         } else if (resp->error_code) {
2421                 rc = rte_le_to_cpu_16(resp->error_code);
2422                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2423                 copy_func_cfg_to_qcaps(cfg_req, resp);
2424         }
2425
2426         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2427         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2428         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2429         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2430         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2431         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2432         /*
2433          * TODO: While not supporting VMDq with VFs, max_vnics is always
2434          * forced to 1 in this case
2435          */
2436         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2437         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2438
2439         HWRM_UNLOCK();
2440 }
2441
2442 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2443 {
2444         struct hwrm_func_qcfg_input req = {0};
2445         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2446         int rc;
2447
2448         /* Check for zero MAC address */
2449         HWRM_PREP(req, FUNC_QCFG);
2450         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2452         if (rc) {
2453                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2454                 return -1;
2455         } else if (resp->error_code) {
2456                 rc = rte_le_to_cpu_16(resp->error_code);
2457                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2458                 return -1;
2459         }
2460         rc = rte_le_to_cpu_16(resp->vlan);
2461
2462         HWRM_UNLOCK();
2463
2464         return rc;
2465 }
2466
2467 static int update_pf_resource_max(struct bnxt *bp)
2468 {
2469         struct hwrm_func_qcfg_input req = {0};
2470         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2471         int rc;
2472
2473         /* And copy the allocated numbers into the pf struct */
2474         HWRM_PREP(req, FUNC_QCFG);
2475         req.fid = rte_cpu_to_le_16(0xffff);
2476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2477         HWRM_CHECK_RESULT();
2478
2479         /* Only TX ring value reflects actual allocation? TODO */
2480         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2481         bp->pf.evb_mode = resp->evb_mode;
2482
2483         HWRM_UNLOCK();
2484
2485         return rc;
2486 }
2487
2488 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2489 {
2490         int rc;
2491
2492         if (!BNXT_PF(bp)) {
2493                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2494                 return -1;
2495         }
2496
2497         rc = bnxt_hwrm_func_qcaps(bp);
2498         if (rc)
2499                 return rc;
2500
2501         bp->pf.func_cfg_flags &=
2502                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2503                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2504         bp->pf.func_cfg_flags |=
2505                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2506         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2507         return rc;
2508 }
2509
2510 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2511 {
2512         struct hwrm_func_cfg_input req = {0};
2513         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2514         int i;
2515         size_t sz;
2516         int rc = 0;
2517         size_t req_buf_sz;
2518
2519         if (!BNXT_PF(bp)) {
2520                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2521                 return -1;
2522         }
2523
2524         rc = bnxt_hwrm_func_qcaps(bp);
2525
2526         if (rc)
2527                 return rc;
2528
2529         bp->pf.active_vfs = num_vfs;
2530
2531         /*
2532          * First, configure the PF to only use one TX ring.  This ensures that
2533          * there are enough rings for all VFs.
2534          *
2535          * If we don't do this, when we call func_alloc() later, we will lock
2536          * extra rings to the PF that won't be available during func_cfg() of
2537          * the VFs.
2538          *
2539          * This has been fixed with firmware versions above 20.6.54
2540          */
2541         bp->pf.func_cfg_flags &=
2542                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2543                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2544         bp->pf.func_cfg_flags |=
2545                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2546         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2547         if (rc)
2548                 return rc;
2549
2550         /*
2551          * Now, create and register a buffer to hold forwarded VF requests
2552          */
2553         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2554         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2555                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2556         if (bp->pf.vf_req_buf == NULL) {
2557                 rc = -ENOMEM;
2558                 goto error_free;
2559         }
2560         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2561                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2562         for (i = 0; i < num_vfs; i++)
2563                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2564                                         (i * HWRM_MAX_REQ_LEN);
2565
2566         rc = bnxt_hwrm_func_buf_rgtr(bp);
2567         if (rc)
2568                 goto error_free;
2569
2570         populate_vf_func_cfg_req(bp, &req, num_vfs);
2571
2572         bp->pf.active_vfs = 0;
2573         for (i = 0; i < num_vfs; i++) {
2574                 add_random_mac_if_needed(bp, &req, i);
2575
2576                 HWRM_PREP(req, FUNC_CFG);
2577                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2578                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2579                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2580
2581                 /* Clear enable flag for next pass */
2582                 req.enables &= ~rte_cpu_to_le_32(
2583                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2584
2585                 if (rc || resp->error_code) {
2586                         RTE_LOG(ERR, PMD,
2587                                 "Failed to initizlie VF %d\n", i);
2588                         RTE_LOG(ERR, PMD,
2589                                 "Not all VFs available. (%d, %d)\n",
2590                                 rc, resp->error_code);
2591                         HWRM_UNLOCK();
2592                         break;
2593                 }
2594
2595                 HWRM_UNLOCK();
2596
2597                 reserve_resources_from_vf(bp, &req, i);
2598                 bp->pf.active_vfs++;
2599                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2600         }
2601
2602         /*
2603          * Now configure the PF to use "the rest" of the resources
2604          * We're using STD_TX_RING_MODE here though which will limit the TX
2605          * rings.  This will allow QoS to function properly.  Not setting this
2606          * will cause PF rings to break bandwidth settings.
2607          */
2608         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2609         if (rc)
2610                 goto error_free;
2611
2612         rc = update_pf_resource_max(bp);
2613         if (rc)
2614                 goto error_free;
2615
2616         return rc;
2617
2618 error_free:
2619         bnxt_hwrm_func_buf_unrgtr(bp);
2620         return rc;
2621 }
2622
2623 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2624 {
2625         struct hwrm_func_cfg_input req = {0};
2626         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2627         int rc;
2628
2629         HWRM_PREP(req, FUNC_CFG);
2630
2631         req.fid = rte_cpu_to_le_16(0xffff);
2632         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2633         req.evb_mode = bp->pf.evb_mode;
2634
2635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2636         HWRM_CHECK_RESULT();
2637         HWRM_UNLOCK();
2638
2639         return rc;
2640 }
2641
2642 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2643                                 uint8_t tunnel_type)
2644 {
2645         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2646         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2647         int rc = 0;
2648
2649         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2650         req.tunnel_type = tunnel_type;
2651         req.tunnel_dst_port_val = port;
2652         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2653         HWRM_CHECK_RESULT();
2654
2655         switch (tunnel_type) {
2656         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2657                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2658                 bp->vxlan_port = port;
2659                 break;
2660         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2661                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2662                 bp->geneve_port = port;
2663                 break;
2664         default:
2665                 break;
2666         }
2667
2668         HWRM_UNLOCK();
2669
2670         return rc;
2671 }
2672
2673 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2674                                 uint8_t tunnel_type)
2675 {
2676         struct hwrm_tunnel_dst_port_free_input req = {0};
2677         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2678         int rc = 0;
2679
2680         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2681
2682         req.tunnel_type = tunnel_type;
2683         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2684         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2685
2686         HWRM_CHECK_RESULT();
2687         HWRM_UNLOCK();
2688
2689         return rc;
2690 }
2691
2692 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2693                                         uint32_t flags)
2694 {
2695         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2696         struct hwrm_func_cfg_input req = {0};
2697         int rc;
2698
2699         HWRM_PREP(req, FUNC_CFG);
2700
2701         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2702         req.flags = rte_cpu_to_le_32(flags);
2703         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2704
2705         HWRM_CHECK_RESULT();
2706         HWRM_UNLOCK();
2707
2708         return rc;
2709 }
2710
2711 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2712 {
2713         uint32_t *flag = flagp;
2714
2715         vnic->flags = *flag;
2716 }
2717
2718 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2719 {
2720         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2721 }
2722
2723 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2724 {
2725         int rc = 0;
2726         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2727         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2728
2729         HWRM_PREP(req, FUNC_BUF_RGTR);
2730
2731         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2732         req.req_buf_page_size = rte_cpu_to_le_16(
2733                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2734         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2735         req.req_buf_page_addr[0] =
2736                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2737         if (req.req_buf_page_addr[0] == 0) {
2738                 RTE_LOG(ERR, PMD,
2739                         "unable to map buffer address to physical memory\n");
2740                 return -ENOMEM;
2741         }
2742
2743         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2744
2745         HWRM_CHECK_RESULT();
2746         HWRM_UNLOCK();
2747
2748         return rc;
2749 }
2750
2751 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2752 {
2753         int rc = 0;
2754         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2755         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2756
2757         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2758
2759         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2760
2761         HWRM_CHECK_RESULT();
2762         HWRM_UNLOCK();
2763
2764         return rc;
2765 }
2766
2767 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2768 {
2769         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2770         struct hwrm_func_cfg_input req = {0};
2771         int rc;
2772
2773         HWRM_PREP(req, FUNC_CFG);
2774
2775         req.fid = rte_cpu_to_le_16(0xffff);
2776         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2777         req.enables = rte_cpu_to_le_32(
2778                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2779         req.async_event_cr = rte_cpu_to_le_16(
2780                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2781         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2782
2783         HWRM_CHECK_RESULT();
2784         HWRM_UNLOCK();
2785
2786         return rc;
2787 }
2788
2789 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2790 {
2791         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2792         struct hwrm_func_vf_cfg_input req = {0};
2793         int rc;
2794
2795         HWRM_PREP(req, FUNC_VF_CFG);
2796
2797         req.enables = rte_cpu_to_le_32(
2798                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2799         req.async_event_cr = rte_cpu_to_le_16(
2800                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2801         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2802
2803         HWRM_CHECK_RESULT();
2804         HWRM_UNLOCK();
2805
2806         return rc;
2807 }
2808
2809 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2810 {
2811         struct hwrm_func_cfg_input req = {0};
2812         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2813         uint16_t dflt_vlan, fid;
2814         uint32_t func_cfg_flags;
2815         int rc = 0;
2816
2817         HWRM_PREP(req, FUNC_CFG);
2818
2819         if (is_vf) {
2820                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2821                 fid = bp->pf.vf_info[vf].fid;
2822                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2823         } else {
2824                 fid = rte_cpu_to_le_16(0xffff);
2825                 func_cfg_flags = bp->pf.func_cfg_flags;
2826                 dflt_vlan = bp->vlan;
2827         }
2828
2829         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2830         req.fid = rte_cpu_to_le_16(fid);
2831         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2832         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2833
2834         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2835
2836         HWRM_CHECK_RESULT();
2837         HWRM_UNLOCK();
2838
2839         return rc;
2840 }
2841
2842 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2843                         uint16_t max_bw, uint16_t enables)
2844 {
2845         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2846         struct hwrm_func_cfg_input req = {0};
2847         int rc;
2848
2849         HWRM_PREP(req, FUNC_CFG);
2850
2851         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2852         req.enables |= rte_cpu_to_le_32(enables);
2853         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2854         req.max_bw = rte_cpu_to_le_32(max_bw);
2855         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2856
2857         HWRM_CHECK_RESULT();
2858         HWRM_UNLOCK();
2859
2860         return rc;
2861 }
2862
2863 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2864 {
2865         struct hwrm_func_cfg_input req = {0};
2866         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2867         int rc = 0;
2868
2869         HWRM_PREP(req, FUNC_CFG);
2870
2871         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2872         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2873         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2874         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2875
2876         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2877
2878         HWRM_CHECK_RESULT();
2879         HWRM_UNLOCK();
2880
2881         return rc;
2882 }
2883
2884 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2885                               void *encaped, size_t ec_size)
2886 {
2887         int rc = 0;
2888         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2889         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2890
2891         if (ec_size > sizeof(req.encap_request))
2892                 return -1;
2893
2894         HWRM_PREP(req, REJECT_FWD_RESP);
2895
2896         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2897         memcpy(req.encap_request, encaped, ec_size);
2898
2899         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2900
2901         HWRM_CHECK_RESULT();
2902         HWRM_UNLOCK();
2903
2904         return rc;
2905 }
2906
2907 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2908                                        struct ether_addr *mac)
2909 {
2910         struct hwrm_func_qcfg_input req = {0};
2911         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2912         int rc;
2913
2914         HWRM_PREP(req, FUNC_QCFG);
2915
2916         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2917         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2918
2919         HWRM_CHECK_RESULT();
2920
2921         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2922
2923         HWRM_UNLOCK();
2924
2925         return rc;
2926 }
2927
2928 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2929                             void *encaped, size_t ec_size)
2930 {
2931         int rc = 0;
2932         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2933         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2934
2935         if (ec_size > sizeof(req.encap_request))
2936                 return -1;
2937
2938         HWRM_PREP(req, EXEC_FWD_RESP);
2939
2940         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2941         memcpy(req.encap_request, encaped, ec_size);
2942
2943         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2944
2945         HWRM_CHECK_RESULT();
2946         HWRM_UNLOCK();
2947
2948         return rc;
2949 }
2950
2951 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2952                          struct rte_eth_stats *stats, uint8_t rx)
2953 {
2954         int rc = 0;
2955         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2956         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2957
2958         HWRM_PREP(req, STAT_CTX_QUERY);
2959
2960         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2961
2962         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2963
2964         HWRM_CHECK_RESULT();
2965
2966         if (rx) {
2967                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2968                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2969                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2970                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2971                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2972                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2973                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2974                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2975         } else {
2976                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2977                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2978                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2979                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2980                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2981                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2982                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2983         }
2984
2985
2986         HWRM_UNLOCK();
2987
2988         return rc;
2989 }
2990
2991 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2992 {
2993         struct hwrm_port_qstats_input req = {0};
2994         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2995         struct bnxt_pf_info *pf = &bp->pf;
2996         int rc;
2997
2998         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2999                 return 0;
3000
3001         HWRM_PREP(req, PORT_QSTATS);
3002
3003         req.port_id = rte_cpu_to_le_16(pf->port_id);
3004         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3005         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3006         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3007
3008         HWRM_CHECK_RESULT();
3009         HWRM_UNLOCK();
3010
3011         return rc;
3012 }
3013
3014 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3015 {
3016         struct hwrm_port_clr_stats_input req = {0};
3017         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3018         struct bnxt_pf_info *pf = &bp->pf;
3019         int rc;
3020
3021         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3022                 return 0;
3023
3024         HWRM_PREP(req, PORT_CLR_STATS);
3025
3026         req.port_id = rte_cpu_to_le_16(pf->port_id);
3027         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3028
3029         HWRM_CHECK_RESULT();
3030         HWRM_UNLOCK();
3031
3032         return rc;
3033 }
3034
3035 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3036 {
3037         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3038         struct hwrm_port_led_qcaps_input req = {0};
3039         int rc;
3040
3041         if (BNXT_VF(bp))
3042                 return 0;
3043
3044         HWRM_PREP(req, PORT_LED_QCAPS);
3045         req.port_id = bp->pf.port_id;
3046         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3047
3048         HWRM_CHECK_RESULT();
3049
3050         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3051                 unsigned int i;
3052
3053                 bp->num_leds = resp->num_leds;
3054                 memcpy(bp->leds, &resp->led0_id,
3055                         sizeof(bp->leds[0]) * bp->num_leds);
3056                 for (i = 0; i < bp->num_leds; i++) {
3057                         struct bnxt_led_info *led = &bp->leds[i];
3058
3059                         uint16_t caps = led->led_state_caps;
3060
3061                         if (!led->led_group_id ||
3062                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3063                                 bp->num_leds = 0;
3064                                 break;
3065                         }
3066                 }
3067         }
3068
3069         HWRM_UNLOCK();
3070
3071         return rc;
3072 }
3073
3074 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3075 {
3076         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3077         struct hwrm_port_led_cfg_input req = {0};
3078         struct bnxt_led_cfg *led_cfg;
3079         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3080         uint16_t duration = 0;
3081         int rc, i;
3082
3083         if (!bp->num_leds || BNXT_VF(bp))
3084                 return -EOPNOTSUPP;
3085
3086         HWRM_PREP(req, PORT_LED_CFG);
3087
3088         if (led_on) {
3089                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3090                 duration = rte_cpu_to_le_16(500);
3091         }
3092         req.port_id = bp->pf.port_id;
3093         req.num_leds = bp->num_leds;
3094         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3095         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3096                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3097                 led_cfg->led_id = bp->leds[i].led_id;
3098                 led_cfg->led_state = led_state;
3099                 led_cfg->led_blink_on = duration;
3100                 led_cfg->led_blink_off = duration;
3101                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3102         }
3103
3104         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3105
3106         HWRM_CHECK_RESULT();
3107         HWRM_UNLOCK();
3108
3109         return rc;
3110 }
3111
3112 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3113                                uint32_t *length)
3114 {
3115         int rc;
3116         struct hwrm_nvm_get_dir_info_input req = {0};
3117         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3118
3119         HWRM_PREP(req, NVM_GET_DIR_INFO);
3120
3121         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3122
3123         HWRM_CHECK_RESULT();
3124         HWRM_UNLOCK();
3125
3126         if (!rc) {
3127                 *entries = rte_le_to_cpu_32(resp->entries);
3128                 *length = rte_le_to_cpu_32(resp->entry_length);
3129         }
3130         return rc;
3131 }
3132
3133 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3134 {
3135         int rc;
3136         uint32_t dir_entries;
3137         uint32_t entry_length;
3138         uint8_t *buf;
3139         size_t buflen;
3140         rte_iova_t dma_handle;
3141         struct hwrm_nvm_get_dir_entries_input req = {0};
3142         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3143
3144         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3145         if (rc != 0)
3146                 return rc;
3147
3148         *data++ = dir_entries;
3149         *data++ = entry_length;
3150         len -= 2;
3151         memset(data, 0xff, len);
3152
3153         buflen = dir_entries * entry_length;
3154         buf = rte_malloc("nvm_dir", buflen, 0);
3155         rte_mem_lock_page(buf);
3156         if (buf == NULL)
3157                 return -ENOMEM;
3158         dma_handle = rte_mem_virt2iova(buf);
3159         if (dma_handle == 0) {
3160                 RTE_LOG(ERR, PMD,
3161                         "unable to map response address to physical memory\n");
3162                 return -ENOMEM;
3163         }
3164         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3165         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3166         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3167
3168         HWRM_CHECK_RESULT();
3169         HWRM_UNLOCK();
3170
3171         if (rc == 0)
3172                 memcpy(data, buf, len > buflen ? buflen : len);
3173
3174         rte_free(buf);
3175
3176         return rc;
3177 }
3178
3179 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3180                              uint32_t offset, uint32_t length,
3181                              uint8_t *data)
3182 {
3183         int rc;
3184         uint8_t *buf;
3185         rte_iova_t dma_handle;
3186         struct hwrm_nvm_read_input req = {0};
3187         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3188
3189         buf = rte_malloc("nvm_item", length, 0);
3190         rte_mem_lock_page(buf);
3191         if (!buf)
3192                 return -ENOMEM;
3193
3194         dma_handle = rte_mem_virt2iova(buf);
3195         if (dma_handle == 0) {
3196                 RTE_LOG(ERR, PMD,
3197                         "unable to map response address to physical memory\n");
3198                 return -ENOMEM;
3199         }
3200         HWRM_PREP(req, NVM_READ);
3201         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3202         req.dir_idx = rte_cpu_to_le_16(index);
3203         req.offset = rte_cpu_to_le_32(offset);
3204         req.len = rte_cpu_to_le_32(length);
3205         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3206         HWRM_CHECK_RESULT();
3207         HWRM_UNLOCK();
3208         if (rc == 0)
3209                 memcpy(data, buf, length);
3210
3211         rte_free(buf);
3212         return rc;
3213 }
3214
3215 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3216 {
3217         int rc;
3218         struct hwrm_nvm_erase_dir_entry_input req = {0};
3219         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3220
3221         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3222         req.dir_idx = rte_cpu_to_le_16(index);
3223         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3224         HWRM_CHECK_RESULT();
3225         HWRM_UNLOCK();
3226
3227         return rc;
3228 }
3229
3230
3231 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3232                           uint16_t dir_ordinal, uint16_t dir_ext,
3233                           uint16_t dir_attr, const uint8_t *data,
3234                           size_t data_len)
3235 {
3236         int rc;
3237         struct hwrm_nvm_write_input req = {0};
3238         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3239         rte_iova_t dma_handle;
3240         uint8_t *buf;
3241
3242         HWRM_PREP(req, NVM_WRITE);
3243
3244         req.dir_type = rte_cpu_to_le_16(dir_type);
3245         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3246         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3247         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3248         req.dir_data_length = rte_cpu_to_le_32(data_len);
3249
3250         buf = rte_malloc("nvm_write", data_len, 0);
3251         rte_mem_lock_page(buf);
3252         if (!buf)
3253                 return -ENOMEM;
3254
3255         dma_handle = rte_mem_virt2iova(buf);
3256         if (dma_handle == 0) {
3257                 RTE_LOG(ERR, PMD,
3258                         "unable to map response address to physical memory\n");
3259                 return -ENOMEM;
3260         }
3261         memcpy(buf, data, data_len);
3262         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3263
3264         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3265
3266         HWRM_CHECK_RESULT();
3267         HWRM_UNLOCK();
3268
3269         rte_free(buf);
3270         return rc;
3271 }
3272
3273 static void
3274 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3275 {
3276         uint32_t *count = cbdata;
3277
3278         *count = *count + 1;
3279 }
3280
3281 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3282                                      struct bnxt_vnic_info *vnic __rte_unused)
3283 {
3284         return 0;
3285 }
3286
3287 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3288 {
3289         uint32_t count = 0;
3290
3291         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3292             &count, bnxt_vnic_count_hwrm_stub);
3293
3294         return count;
3295 }
3296
3297 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3298                                         uint16_t *vnic_ids)
3299 {
3300         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3301         struct hwrm_func_vf_vnic_ids_query_output *resp =
3302                                                 bp->hwrm_cmd_resp_addr;
3303         int rc;
3304
3305         /* First query all VNIC ids */
3306         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3307
3308         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3309         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3310         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3311
3312         if (req.vnic_id_tbl_addr == 0) {
3313                 HWRM_UNLOCK();
3314                 RTE_LOG(ERR, PMD,
3315                 "unable to map VNIC ID table address to physical memory\n");
3316                 return -ENOMEM;
3317         }
3318         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3319         if (rc) {
3320                 HWRM_UNLOCK();
3321                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3322                 return -1;
3323         } else if (resp->error_code) {
3324                 rc = rte_le_to_cpu_16(resp->error_code);
3325                 HWRM_UNLOCK();
3326                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
3327                 return -1;
3328         }
3329         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3330
3331         HWRM_UNLOCK();
3332
3333         return rc;
3334 }
3335
3336 /*
3337  * This function queries the VNIC IDs  for a specified VF. It then calls
3338  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3339  * Then it calls the hwrm_cb function to program this new vnic configuration.
3340  */
3341 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3342         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3343         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3344 {
3345         struct bnxt_vnic_info vnic;
3346         int rc = 0;
3347         int i, num_vnic_ids;
3348         uint16_t *vnic_ids;
3349         size_t vnic_id_sz;
3350         size_t sz;
3351
3352         /* First query all VNIC ids */
3353         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3354         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3355                         RTE_CACHE_LINE_SIZE);
3356         if (vnic_ids == NULL) {
3357                 rc = -ENOMEM;
3358                 return rc;
3359         }
3360         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3361                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3362
3363         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3364
3365         if (num_vnic_ids < 0)
3366                 return num_vnic_ids;
3367
3368         /* Retrieve VNIC, update bd_stall then update */
3369
3370         for (i = 0; i < num_vnic_ids; i++) {
3371                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3372                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3373                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3374                 if (rc)
3375                         break;
3376                 if (vnic.mru <= 4)      /* Indicates unallocated */
3377                         continue;
3378
3379                 vnic_cb(&vnic, cbdata);
3380
3381                 rc = hwrm_cb(bp, &vnic);
3382                 if (rc)
3383                         break;
3384         }
3385
3386         rte_free(vnic_ids);
3387
3388         return rc;
3389 }
3390
3391 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3392                                               bool on)
3393 {
3394         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3395         struct hwrm_func_cfg_input req = {0};
3396         int rc;
3397
3398         HWRM_PREP(req, FUNC_CFG);
3399
3400         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3401         req.enables |= rte_cpu_to_le_32(
3402                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3403         req.vlan_antispoof_mode = on ?
3404                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3405                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3406         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3407
3408         HWRM_CHECK_RESULT();
3409         HWRM_UNLOCK();
3410
3411         return rc;
3412 }
3413
3414 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3415 {
3416         struct bnxt_vnic_info vnic;
3417         uint16_t *vnic_ids;
3418         size_t vnic_id_sz;
3419         int num_vnic_ids, i;
3420         size_t sz;
3421         int rc;
3422
3423         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3424         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3425                         RTE_CACHE_LINE_SIZE);
3426         if (vnic_ids == NULL) {
3427                 rc = -ENOMEM;
3428                 return rc;
3429         }
3430
3431         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3432                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3433
3434         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3435         if (rc <= 0)
3436                 goto exit;
3437         num_vnic_ids = rc;
3438
3439         /*
3440          * Loop through to find the default VNIC ID.
3441          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3442          * by sending the hwrm_func_qcfg command to the firmware.
3443          */
3444         for (i = 0; i < num_vnic_ids; i++) {
3445                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3446                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3447                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3448                                         bp->pf.first_vf_id + vf);
3449                 if (rc)
3450                         goto exit;
3451                 if (vnic.func_default) {
3452                         rte_free(vnic_ids);
3453                         return vnic.fw_vnic_id;
3454                 }
3455         }
3456         /* Could not find a default VNIC. */
3457         RTE_LOG(ERR, PMD, "No default VNIC\n");
3458 exit:
3459         rte_free(vnic_ids);
3460         return -1;
3461 }
3462
3463 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3464                          uint16_t dst_id,
3465                          struct bnxt_filter_info *filter)
3466 {
3467         int rc = 0;
3468         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3469         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3470         uint32_t enables = 0;
3471
3472         if (filter->fw_em_filter_id != UINT64_MAX)
3473                 bnxt_hwrm_clear_em_filter(bp, filter);
3474
3475         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3476
3477         req.flags = rte_cpu_to_le_32(filter->flags);
3478
3479         enables = filter->enables |
3480               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3481         req.dst_id = rte_cpu_to_le_16(dst_id);
3482
3483         if (filter->ip_addr_type) {
3484                 req.ip_addr_type = filter->ip_addr_type;
3485                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3486         }
3487         if (enables &
3488             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3489                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3490         if (enables &
3491             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3492                 memcpy(req.src_macaddr, filter->src_macaddr,
3493                        ETHER_ADDR_LEN);
3494         if (enables &
3495             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3496                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3497                        ETHER_ADDR_LEN);
3498         if (enables &
3499             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3500                 req.ovlan_vid = filter->l2_ovlan;
3501         if (enables &
3502             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3503                 req.ivlan_vid = filter->l2_ivlan;
3504         if (enables &
3505             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3506                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3507         if (enables &
3508             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3509                 req.ip_protocol = filter->ip_protocol;
3510         if (enables &
3511             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3512                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3513         if (enables &
3514             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3515                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3516         if (enables &
3517             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3518                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3519         if (enables &
3520             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3521                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3522         if (enables &
3523             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3524                 req.mirror_vnic_id = filter->mirror_vnic_id;
3525
3526         req.enables = rte_cpu_to_le_32(enables);
3527
3528         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3529
3530         HWRM_CHECK_RESULT();
3531
3532         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3533         HWRM_UNLOCK();
3534
3535         return rc;
3536 }
3537
3538 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3539 {
3540         int rc = 0;
3541         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3542         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3543
3544         if (filter->fw_em_filter_id == UINT64_MAX)
3545                 return 0;
3546
3547         RTE_LOG(ERR, PMD, "Clear EM filter\n");
3548         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3549
3550         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3551
3552         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3553
3554         HWRM_CHECK_RESULT();
3555         HWRM_UNLOCK();
3556
3557         filter->fw_em_filter_id = -1;
3558         filter->fw_l2_filter_id = -1;
3559
3560         return 0;
3561 }
3562
3563 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3564                          uint16_t dst_id,
3565                          struct bnxt_filter_info *filter)
3566 {
3567         int rc = 0;
3568         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3569         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3570                                                 bp->hwrm_cmd_resp_addr;
3571         uint32_t enables = 0;
3572
3573         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3574                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3575
3576         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3577
3578         req.flags = rte_cpu_to_le_32(filter->flags);
3579
3580         enables = filter->enables |
3581               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3582         req.dst_id = rte_cpu_to_le_16(dst_id);
3583
3584
3585         if (filter->ip_addr_type) {
3586                 req.ip_addr_type = filter->ip_addr_type;
3587                 enables |=
3588                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3589         }
3590         if (enables &
3591             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3592                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3593         if (enables &
3594             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3595                 memcpy(req.src_macaddr, filter->src_macaddr,
3596                        ETHER_ADDR_LEN);
3597         //if (enables &
3598             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3599                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3600                        //ETHER_ADDR_LEN);
3601         if (enables &
3602             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3603                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3604         if (enables &
3605             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3606                 req.ip_protocol = filter->ip_protocol;
3607         if (enables &
3608             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3609                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3610         if (enables &
3611             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3612                 req.src_ipaddr_mask[0] =
3613                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3614         if (enables &
3615             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3616                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3617         if (enables &
3618             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3619                 req.dst_ipaddr_mask[0] =
3620                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3621         if (enables &
3622             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3623                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3624         if (enables &
3625             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3626                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3627         if (enables &
3628             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3629                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3630         if (enables &
3631             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3632                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3633         if (enables &
3634             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3635                 req.mirror_vnic_id = filter->mirror_vnic_id;
3636
3637         req.enables = rte_cpu_to_le_32(enables);
3638
3639         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3640
3641         HWRM_CHECK_RESULT();
3642
3643         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3644         HWRM_UNLOCK();
3645
3646         return rc;
3647 }
3648
3649 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3650                                 struct bnxt_filter_info *filter)
3651 {
3652         int rc = 0;
3653         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3654         struct hwrm_cfa_ntuple_filter_free_output *resp =
3655                                                 bp->hwrm_cmd_resp_addr;
3656
3657         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3658                 return 0;
3659
3660         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3661
3662         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3663
3664         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3665
3666         HWRM_CHECK_RESULT();
3667         HWRM_UNLOCK();
3668
3669         filter->fw_ntuple_filter_id = -1;
3670         filter->fw_l2_filter_id = -1;
3671
3672         return 0;
3673 }