net/bnxt: revert reset of L2 filter id
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                         uint32_t msg_len)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83
84         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
85                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86
87                 memset(short_cmd_req, 0, bp->max_req_len);
88                 memcpy(short_cmd_req, req, msg_len);
89
90                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
91                 short_input.signature = rte_cpu_to_le_16(
92                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
93                 short_input.size = rte_cpu_to_le_16(msg_len);
94                 short_input.req_addr =
95                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96
97                 data = (uint32_t *)&short_input;
98                 msg_len = sizeof(short_input);
99
100                 /* Sync memory write before updating doorbell */
101                 rte_wmb();
102
103                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
104         }
105
106         /* Write request msg to hwrm channel */
107         for (i = 0; i < msg_len; i += 4) {
108                 bar = (uint8_t *)bp->bar0 + i;
109                 rte_write32(*data, bar);
110                 data++;
111         }
112
113         /* Zero the rest of the request space */
114         for (; i < max_req_len; i += 4) {
115                 bar = (uint8_t *)bp->bar0 + i;
116                 rte_write32(0, bar);
117         }
118
119         /* Ring channel doorbell */
120         bar = (uint8_t *)bp->bar0 + 0x100;
121         rte_write32(1, bar);
122
123         /* Poll for the valid bit */
124         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
125                 /* Sanity check on the resp->resp_len */
126                 rte_rmb();
127                 if (resp->resp_len && resp->resp_len <=
128                                 bp->max_resp_len) {
129                         /* Last byte of resp contains the valid key */
130                         valid = (uint8_t *)resp + resp->resp_len - 1;
131                         if (*valid == HWRM_RESP_VALID_KEY)
132                                 break;
133                 }
134                 rte_delay_us(600);
135         }
136
137         if (i >= HWRM_CMD_TIMEOUT) {
138                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
139                         req->req_type);
140                 goto err_ret;
141         }
142         return 0;
143
144 err_ret:
145         return -1;
146 }
147
148 /*
149  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
150  * spinlock, and does initial processing.
151  *
152  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
153  * releases the spinlock only if it returns.  If the regular int return codes
154  * are not used by the function, HWRM_CHECK_RESULT() should not be used
155  * directly, rather it should be copied and modified to suit the function.
156  *
157  * HWRM_UNLOCK() must be called after all response processing is completed.
158  */
159 #define HWRM_PREP(req, type) do { \
160         rte_spinlock_lock(&bp->hwrm_lock); \
161         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163         req.cmpl_ring = rte_cpu_to_le_16(-1); \
164         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165         req.target_id = rte_cpu_to_le_16(0xffff); \
166         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
167 } while (0)
168
169 #define HWRM_CHECK_RESULT() do {\
170         if (rc) { \
171                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
172                 rte_spinlock_unlock(&bp->hwrm_lock); \
173                 return rc; \
174         } \
175         if (resp->error_code) { \
176                 rc = rte_le_to_cpu_16(resp->error_code); \
177                 if (resp->resp_len >= 16) { \
178                         struct hwrm_err_output *tmp_hwrm_err_op = \
179                                                 (void *)resp; \
180                         PMD_DRV_LOG(ERR, \
181                                 "error %d:%d:%08x:%04x\n", \
182                                 rc, tmp_hwrm_err_op->cmd_err, \
183                                 rte_le_to_cpu_32(\
184                                         tmp_hwrm_err_op->opaque_0), \
185                                 rte_le_to_cpu_16(\
186                                         tmp_hwrm_err_op->opaque_1)); \
187                 } else { \
188                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
189                 } \
190                 rte_spinlock_unlock(&bp->hwrm_lock); \
191                 return rc; \
192         } \
193 } while (0)
194
195 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
196
197 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
198 {
199         int rc = 0;
200         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
201         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
202
203         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
204         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
205         req.mask = 0;
206
207         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
208
209         HWRM_CHECK_RESULT();
210         HWRM_UNLOCK();
211
212         return rc;
213 }
214
215 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
216                                  struct bnxt_vnic_info *vnic,
217                                  uint16_t vlan_count,
218                                  struct bnxt_vlan_table_entry *vlan_table)
219 {
220         int rc = 0;
221         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
222         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
223         uint32_t mask = 0;
224
225         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
226                 return rc;
227
228         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
229         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
230
231         /* FIXME add multicast flag, when multicast adding options is supported
232          * by ethtool.
233          */
234         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
235                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
236         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
237                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
238         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
239                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
240         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
241                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
242         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
243                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
244         if (vnic->mc_addr_cnt) {
245                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
246                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
247                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
248         }
249         if (vlan_table) {
250                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
251                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
252                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
253                          rte_mem_virt2iova(vlan_table));
254                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
255         }
256         req.mask = rte_cpu_to_le_32(mask);
257
258         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
259
260         HWRM_CHECK_RESULT();
261         HWRM_UNLOCK();
262
263         return rc;
264 }
265
266 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
267                         uint16_t vlan_count,
268                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
269 {
270         int rc = 0;
271         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
272         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
273                                                 bp->hwrm_cmd_resp_addr;
274
275         /*
276          * Older HWRM versions did not support this command, and the set_rx_mask
277          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
278          * removed from set_rx_mask call, and this command was added.
279          *
280          * This command is also present from 1.7.8.11 and higher,
281          * as well as 1.7.8.0
282          */
283         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
284                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
285                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
286                                         (11)))
287                                 return 0;
288                 }
289         }
290         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
291         req.fid = rte_cpu_to_le_16(fid);
292
293         req.vlan_tag_mask_tbl_addr =
294                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
295         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
296
297         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
298
299         HWRM_CHECK_RESULT();
300         HWRM_UNLOCK();
301
302         return rc;
303 }
304
305 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
306                            struct bnxt_filter_info *filter)
307 {
308         int rc = 0;
309         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
310         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
311
312         if (filter->fw_l2_filter_id == UINT64_MAX)
313                 return 0;
314
315         HWRM_PREP(req, CFA_L2_FILTER_FREE);
316
317         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
318
319         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
320
321         HWRM_CHECK_RESULT();
322         HWRM_UNLOCK();
323
324         filter->fw_l2_filter_id = UINT64_MAX;
325
326         return 0;
327 }
328
329 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
330                          uint16_t dst_id,
331                          struct bnxt_filter_info *filter)
332 {
333         int rc = 0;
334         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
335         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
336         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
337         const struct rte_eth_vmdq_rx_conf *conf =
338                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
339         uint32_t enables = 0;
340         uint16_t j = dst_id - 1;
341
342         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
343         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
344             conf->pool_map[j].pools & (1UL << j)) {
345                 PMD_DRV_LOG(DEBUG,
346                         "Add vlan %u to vmdq pool %u\n",
347                         conf->pool_map[j].vlan_id, j);
348
349                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
350                 filter->enables |=
351                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
352                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
353         }
354
355         if (filter->fw_l2_filter_id != UINT64_MAX)
356                 bnxt_hwrm_clear_l2_filter(bp, filter);
357
358         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
359
360         req.flags = rte_cpu_to_le_32(filter->flags);
361
362         enables = filter->enables |
363               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
364         req.dst_id = rte_cpu_to_le_16(dst_id);
365
366         if (enables &
367             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
368                 memcpy(req.l2_addr, filter->l2_addr,
369                        ETHER_ADDR_LEN);
370         if (enables &
371             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
372                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
373                        ETHER_ADDR_LEN);
374         if (enables &
375             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
376                 req.l2_ovlan = filter->l2_ovlan;
377         if (enables &
378             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
379                 req.l2_ovlan = filter->l2_ivlan;
380         if (enables &
381             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
382                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
383         if (enables &
384             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
385                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
386         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
387                 req.src_id = rte_cpu_to_le_32(filter->src_id);
388         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
389                 req.src_type = filter->src_type;
390
391         req.enables = rte_cpu_to_le_32(enables);
392
393         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
394
395         HWRM_CHECK_RESULT();
396
397         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
398         HWRM_UNLOCK();
399
400         return rc;
401 }
402
403 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
404 {
405         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
406         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
407         uint32_t flags = 0;
408         int rc;
409
410         if (!ptp)
411                 return 0;
412
413         HWRM_PREP(req, PORT_MAC_CFG);
414
415         if (ptp->rx_filter)
416                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
417         else
418                 flags |=
419                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
420         if (ptp->tx_tstamp_en)
421                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
422         else
423                 flags |=
424                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
425         req.flags = rte_cpu_to_le_32(flags);
426         req.enables = rte_cpu_to_le_32
427                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
428         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
429
430         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
431         HWRM_UNLOCK();
432
433         return rc;
434 }
435
436 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
437 {
438         int rc = 0;
439         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
440         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
441         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
442
443 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
444         if (ptp)
445                 return 0;
446
447         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
448
449         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
450
451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
452
453         HWRM_CHECK_RESULT();
454
455         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
456                 return 0;
457
458         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
459         if (!ptp)
460                 return -ENOMEM;
461
462         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
463                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
464         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
465                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
466         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
467                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
468         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
469                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
470         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
471                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
472         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
473                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
474         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
475                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
476         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
477                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
478         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
479                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
480
481         ptp->bp = bp;
482         bp->ptp_cfg = ptp;
483
484         return 0;
485 }
486
487 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
488 {
489         int rc = 0;
490         struct hwrm_func_qcaps_input req = {.req_type = 0 };
491         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
492         uint16_t new_max_vfs;
493         uint32_t flags;
494         int i;
495
496         HWRM_PREP(req, FUNC_QCAPS);
497
498         req.fid = rte_cpu_to_le_16(0xffff);
499
500         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
501
502         HWRM_CHECK_RESULT();
503
504         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
505         flags = rte_le_to_cpu_32(resp->flags);
506         if (BNXT_PF(bp)) {
507                 bp->pf.port_id = resp->port_id;
508                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
509                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
510                 new_max_vfs = bp->pdev->max_vfs;
511                 if (new_max_vfs != bp->pf.max_vfs) {
512                         if (bp->pf.vf_info)
513                                 rte_free(bp->pf.vf_info);
514                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
515                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
516                         bp->pf.max_vfs = new_max_vfs;
517                         for (i = 0; i < new_max_vfs; i++) {
518                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
519                                 bp->pf.vf_info[i].vlan_table =
520                                         rte_zmalloc("VF VLAN table",
521                                                     getpagesize(),
522                                                     getpagesize());
523                                 if (bp->pf.vf_info[i].vlan_table == NULL)
524                                         PMD_DRV_LOG(ERR,
525                                         "Fail to alloc VLAN table for VF %d\n",
526                                         i);
527                                 else
528                                         rte_mem_lock_page(
529                                                 bp->pf.vf_info[i].vlan_table);
530                                 bp->pf.vf_info[i].vlan_as_table =
531                                         rte_zmalloc("VF VLAN AS table",
532                                                     getpagesize(),
533                                                     getpagesize());
534                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
535                                         PMD_DRV_LOG(ERR,
536                                         "Alloc VLAN AS table for VF %d fail\n",
537                                         i);
538                                 else
539                                         rte_mem_lock_page(
540                                                bp->pf.vf_info[i].vlan_as_table);
541                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
542                         }
543                 }
544         }
545
546         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
547         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
548         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
549         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
550         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
551         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
552         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
553         /* TODO: For now, do not support VMDq/RFS on VFs. */
554         if (BNXT_PF(bp)) {
555                 if (bp->pf.max_vfs)
556                         bp->max_vnics = 1;
557                 else
558                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
559         } else {
560                 bp->max_vnics = 1;
561         }
562         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
563         if (BNXT_PF(bp)) {
564                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
565                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
566                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
567                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
568                         HWRM_UNLOCK();
569                         bnxt_hwrm_ptp_qcfg(bp);
570                 }
571         }
572
573         HWRM_UNLOCK();
574
575         return rc;
576 }
577
578 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
579 {
580         int rc;
581
582         rc = __bnxt_hwrm_func_qcaps(bp);
583         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
584                 rc = bnxt_hwrm_func_resc_qcaps(bp);
585                 if (!rc)
586                         bp->flags |= BNXT_FLAG_NEW_RM;
587         }
588
589         return rc;
590 }
591
592 int bnxt_hwrm_func_reset(struct bnxt *bp)
593 {
594         int rc = 0;
595         struct hwrm_func_reset_input req = {.req_type = 0 };
596         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
597
598         HWRM_PREP(req, FUNC_RESET);
599
600         req.enables = rte_cpu_to_le_32(0);
601
602         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
603
604         HWRM_CHECK_RESULT();
605         HWRM_UNLOCK();
606
607         return rc;
608 }
609
610 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
611 {
612         int rc;
613         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
614         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
615
616         if (bp->flags & BNXT_FLAG_REGISTERED)
617                 return 0;
618
619         HWRM_PREP(req, FUNC_DRV_RGTR);
620         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
621                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
622         req.ver_maj = RTE_VER_YEAR;
623         req.ver_min = RTE_VER_MONTH;
624         req.ver_upd = RTE_VER_MINOR;
625
626         if (BNXT_PF(bp)) {
627                 req.enables |= rte_cpu_to_le_32(
628                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
629                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
630                        RTE_MIN(sizeof(req.vf_req_fwd),
631                                sizeof(bp->pf.vf_req_fwd)));
632
633                 /*
634                  * PF can sniff HWRM API issued by VF. This can be set up by
635                  * linux driver and inherited by the DPDK PF driver. Clear
636                  * this HWRM sniffer list in FW because DPDK PF driver does
637                  * not support this.
638                  */
639                 req.flags =
640                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
641         }
642
643         req.async_event_fwd[0] |=
644                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
645                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
646                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
647         req.async_event_fwd[1] |=
648                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
649                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
650
651         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
652
653         HWRM_CHECK_RESULT();
654         HWRM_UNLOCK();
655
656         bp->flags |= BNXT_FLAG_REGISTERED;
657
658         return rc;
659 }
660
661 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
662 {
663         int rc;
664         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
665         struct hwrm_func_vf_cfg_input req = {0};
666
667         HWRM_PREP(req, FUNC_VF_CFG);
668
669         req.enables = rte_cpu_to_le_32
670                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
671                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
672                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
673                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
674                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
675
676         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
677         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
678                                             AGG_RING_MULTIPLIER);
679         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
680         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
681                                               bp->tx_nr_rings);
682         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
683
684         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
685
686         HWRM_CHECK_RESULT();
687         HWRM_UNLOCK();
688         return rc;
689 }
690
691 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
692 {
693         int rc;
694         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
695         struct hwrm_func_resource_qcaps_input req = {0};
696
697         HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
698         req.fid = rte_cpu_to_le_16(0xffff);
699
700         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
701
702         HWRM_CHECK_RESULT();
703
704         if (BNXT_VF(bp)) {
705                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
706                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
707                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
708                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
709                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
710                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
711                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
712                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
713         }
714
715         HWRM_UNLOCK();
716         return rc;
717 }
718
719 int bnxt_hwrm_ver_get(struct bnxt *bp)
720 {
721         int rc = 0;
722         struct hwrm_ver_get_input req = {.req_type = 0 };
723         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
724         uint32_t my_version;
725         uint32_t fw_version;
726         uint16_t max_resp_len;
727         char type[RTE_MEMZONE_NAMESIZE];
728         uint32_t dev_caps_cfg;
729
730         bp->max_req_len = HWRM_MAX_REQ_LEN;
731         HWRM_PREP(req, VER_GET);
732
733         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
734         req.hwrm_intf_min = HWRM_VERSION_MINOR;
735         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
736
737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
738
739         HWRM_CHECK_RESULT();
740
741         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
742                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
743                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
744                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
745         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
746                      (resp->hwrm_fw_min_8b << 16) |
747                      (resp->hwrm_fw_bld_8b << 8) |
748                      resp->hwrm_fw_rsvd_8b;
749         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
750                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
751
752         my_version = HWRM_VERSION_MAJOR << 16;
753         my_version |= HWRM_VERSION_MINOR << 8;
754         my_version |= HWRM_VERSION_UPDATE;
755
756         fw_version = resp->hwrm_intf_maj_8b << 16;
757         fw_version |= resp->hwrm_intf_min_8b << 8;
758         fw_version |= resp->hwrm_intf_upd_8b;
759         bp->hwrm_spec_code = fw_version;
760
761         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
762                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
763                 rc = -EINVAL;
764                 goto error;
765         }
766
767         if (my_version != fw_version) {
768                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
769                 if (my_version < fw_version) {
770                         PMD_DRV_LOG(INFO,
771                                 "Firmware API version is newer than driver.\n");
772                         PMD_DRV_LOG(INFO,
773                                 "The driver may be missing features.\n");
774                 } else {
775                         PMD_DRV_LOG(INFO,
776                                 "Firmware API version is older than driver.\n");
777                         PMD_DRV_LOG(INFO,
778                                 "Not all driver features may be functional.\n");
779                 }
780         }
781
782         if (bp->max_req_len > resp->max_req_win_len) {
783                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
784                 rc = -EINVAL;
785         }
786         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
787         max_resp_len = resp->max_resp_len;
788         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
789
790         if (bp->max_resp_len != max_resp_len) {
791                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
792                         bp->pdev->addr.domain, bp->pdev->addr.bus,
793                         bp->pdev->addr.devid, bp->pdev->addr.function);
794
795                 rte_free(bp->hwrm_cmd_resp_addr);
796
797                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
798                 if (bp->hwrm_cmd_resp_addr == NULL) {
799                         rc = -ENOMEM;
800                         goto error;
801                 }
802                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
803                 bp->hwrm_cmd_resp_dma_addr =
804                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
805                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
806                         PMD_DRV_LOG(ERR,
807                         "Unable to map response buffer to physical memory.\n");
808                         rc = -ENOMEM;
809                         goto error;
810                 }
811                 bp->max_resp_len = max_resp_len;
812         }
813
814         if ((dev_caps_cfg &
815                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
816             (dev_caps_cfg &
817              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
818                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
819
820                 rte_free(bp->hwrm_short_cmd_req_addr);
821
822                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
823                                                         bp->max_req_len, 0);
824                 if (bp->hwrm_short_cmd_req_addr == NULL) {
825                         rc = -ENOMEM;
826                         goto error;
827                 }
828                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
829                 bp->hwrm_short_cmd_req_dma_addr =
830                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
831                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
832                         rte_free(bp->hwrm_short_cmd_req_addr);
833                         PMD_DRV_LOG(ERR,
834                                 "Unable to map buffer to physical memory.\n");
835                         rc = -ENOMEM;
836                         goto error;
837                 }
838
839                 bp->flags |= BNXT_FLAG_SHORT_CMD;
840         }
841
842 error:
843         HWRM_UNLOCK();
844         return rc;
845 }
846
847 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
848 {
849         int rc;
850         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
851         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
852
853         if (!(bp->flags & BNXT_FLAG_REGISTERED))
854                 return 0;
855
856         HWRM_PREP(req, FUNC_DRV_UNRGTR);
857         req.flags = flags;
858
859         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
860
861         HWRM_CHECK_RESULT();
862         HWRM_UNLOCK();
863
864         bp->flags &= ~BNXT_FLAG_REGISTERED;
865
866         return rc;
867 }
868
869 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
870 {
871         int rc = 0;
872         struct hwrm_port_phy_cfg_input req = {0};
873         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
874         uint32_t enables = 0;
875
876         HWRM_PREP(req, PORT_PHY_CFG);
877
878         if (conf->link_up) {
879                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
880                 if (bp->link_info.auto_mode && conf->link_speed) {
881                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
882                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
883                 }
884
885                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
886                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
887                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
888                 /*
889                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
890                  * any auto mode, even "none".
891                  */
892                 if (!conf->link_speed) {
893                         /* No speeds specified. Enable AutoNeg - all speeds */
894                         req.auto_mode =
895                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
896                 }
897                 /* AutoNeg - Advertise speeds specified. */
898                 if (conf->auto_link_speed_mask &&
899                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
900                         req.auto_mode =
901                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
902                         req.auto_link_speed_mask =
903                                 conf->auto_link_speed_mask;
904                         enables |=
905                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
906                 }
907
908                 req.auto_duplex = conf->duplex;
909                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
910                 req.auto_pause = conf->auto_pause;
911                 req.force_pause = conf->force_pause;
912                 /* Set force_pause if there is no auto or if there is a force */
913                 if (req.auto_pause && !req.force_pause)
914                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
915                 else
916                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
917
918                 req.enables = rte_cpu_to_le_32(enables);
919         } else {
920                 req.flags =
921                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
922                 PMD_DRV_LOG(INFO, "Force Link Down\n");
923         }
924
925         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
926
927         HWRM_CHECK_RESULT();
928         HWRM_UNLOCK();
929
930         return rc;
931 }
932
933 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
934                                    struct bnxt_link_info *link_info)
935 {
936         int rc = 0;
937         struct hwrm_port_phy_qcfg_input req = {0};
938         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
939
940         HWRM_PREP(req, PORT_PHY_QCFG);
941
942         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
943
944         HWRM_CHECK_RESULT();
945
946         link_info->phy_link_status = resp->link;
947         link_info->link_up =
948                 (link_info->phy_link_status ==
949                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
950         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
951         link_info->duplex = resp->duplex_cfg;
952         link_info->pause = resp->pause;
953         link_info->auto_pause = resp->auto_pause;
954         link_info->force_pause = resp->force_pause;
955         link_info->auto_mode = resp->auto_mode;
956         link_info->phy_type = resp->phy_type;
957         link_info->media_type = resp->media_type;
958
959         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
960         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
961         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
962         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
963         link_info->phy_ver[0] = resp->phy_maj;
964         link_info->phy_ver[1] = resp->phy_min;
965         link_info->phy_ver[2] = resp->phy_bld;
966
967         HWRM_UNLOCK();
968
969         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
970         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
971         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
972         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
973         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
974                     link_info->auto_link_speed_mask);
975         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
976                     link_info->force_link_speed);
977
978         return rc;
979 }
980
981 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
982 {
983         int rc = 0;
984         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
985         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
986         int i;
987
988         HWRM_PREP(req, QUEUE_QPORTCFG);
989
990         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
991         /* HWRM Version >= 1.9.1 */
992         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
993                 req.drv_qmap_cap =
994                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
995         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
996
997         HWRM_CHECK_RESULT();
998
999 #define GET_QUEUE_INFO(x) \
1000         bp->cos_queue[x].id = resp->queue_id##x; \
1001         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1002
1003         GET_QUEUE_INFO(0);
1004         GET_QUEUE_INFO(1);
1005         GET_QUEUE_INFO(2);
1006         GET_QUEUE_INFO(3);
1007         GET_QUEUE_INFO(4);
1008         GET_QUEUE_INFO(5);
1009         GET_QUEUE_INFO(6);
1010         GET_QUEUE_INFO(7);
1011
1012         HWRM_UNLOCK();
1013
1014         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1015                 bp->tx_cosq_id = bp->cos_queue[0].id;
1016         } else {
1017                 /* iterate and find the COSq profile to use for Tx */
1018                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1019                         if (bp->cos_queue[i].profile ==
1020                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1021                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1022                                 break;
1023                         }
1024                 }
1025         }
1026         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1027
1028         return rc;
1029 }
1030
1031 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1032                          struct bnxt_ring *ring,
1033                          uint32_t ring_type, uint32_t map_index,
1034                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1035 {
1036         int rc = 0;
1037         uint32_t enables = 0;
1038         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1039         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1040
1041         HWRM_PREP(req, RING_ALLOC);
1042
1043         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1044         req.fbo = rte_cpu_to_le_32(0);
1045         /* Association of ring index with doorbell index */
1046         req.logical_id = rte_cpu_to_le_16(map_index);
1047         req.length = rte_cpu_to_le_32(ring->ring_size);
1048
1049         switch (ring_type) {
1050         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1051                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1052                 /* FALLTHROUGH */
1053         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1054                 req.ring_type = ring_type;
1055                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1056                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1057                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1058                         enables |=
1059                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1060                 break;
1061         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1062                 req.ring_type = ring_type;
1063                 /*
1064                  * TODO: Some HWRM versions crash with
1065                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1066                  */
1067                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1068                 break;
1069         default:
1070                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1071                         ring_type);
1072                 HWRM_UNLOCK();
1073                 return -1;
1074         }
1075         req.enables = rte_cpu_to_le_32(enables);
1076
1077         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1078
1079         if (rc || resp->error_code) {
1080                 if (rc == 0 && resp->error_code)
1081                         rc = rte_le_to_cpu_16(resp->error_code);
1082                 switch (ring_type) {
1083                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1084                         PMD_DRV_LOG(ERR,
1085                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1086                         HWRM_UNLOCK();
1087                         return rc;
1088                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1089                         PMD_DRV_LOG(ERR,
1090                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1091                         HWRM_UNLOCK();
1092                         return rc;
1093                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1094                         PMD_DRV_LOG(ERR,
1095                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1096                         HWRM_UNLOCK();
1097                         return rc;
1098                 default:
1099                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1100                         HWRM_UNLOCK();
1101                         return rc;
1102                 }
1103         }
1104
1105         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1106         HWRM_UNLOCK();
1107         return rc;
1108 }
1109
1110 int bnxt_hwrm_ring_free(struct bnxt *bp,
1111                         struct bnxt_ring *ring, uint32_t ring_type)
1112 {
1113         int rc;
1114         struct hwrm_ring_free_input req = {.req_type = 0 };
1115         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1116
1117         HWRM_PREP(req, RING_FREE);
1118
1119         req.ring_type = ring_type;
1120         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1121
1122         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1123
1124         if (rc || resp->error_code) {
1125                 if (rc == 0 && resp->error_code)
1126                         rc = rte_le_to_cpu_16(resp->error_code);
1127                 HWRM_UNLOCK();
1128
1129                 switch (ring_type) {
1130                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1131                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1132                                 rc);
1133                         return rc;
1134                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1135                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1136                                 rc);
1137                         return rc;
1138                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1139                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1140                                 rc);
1141                         return rc;
1142                 default:
1143                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1144                         return rc;
1145                 }
1146         }
1147         HWRM_UNLOCK();
1148         return 0;
1149 }
1150
1151 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1152 {
1153         int rc = 0;
1154         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1155         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1156
1157         HWRM_PREP(req, RING_GRP_ALLOC);
1158
1159         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1160         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1161         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1162         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1163
1164         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1165
1166         HWRM_CHECK_RESULT();
1167
1168         bp->grp_info[idx].fw_grp_id =
1169             rte_le_to_cpu_16(resp->ring_group_id);
1170
1171         HWRM_UNLOCK();
1172
1173         return rc;
1174 }
1175
1176 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1177 {
1178         int rc;
1179         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1180         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1181
1182         HWRM_PREP(req, RING_GRP_FREE);
1183
1184         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1185
1186         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1187
1188         HWRM_CHECK_RESULT();
1189         HWRM_UNLOCK();
1190
1191         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1192         return rc;
1193 }
1194
1195 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1196 {
1197         int rc = 0;
1198         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1199         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1200
1201         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1202                 return rc;
1203
1204         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1205
1206         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1207
1208         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1209
1210         HWRM_CHECK_RESULT();
1211         HWRM_UNLOCK();
1212
1213         return rc;
1214 }
1215
1216 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1217                                 unsigned int idx __rte_unused)
1218 {
1219         int rc;
1220         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1221         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1222
1223         HWRM_PREP(req, STAT_CTX_ALLOC);
1224
1225         req.update_period_ms = rte_cpu_to_le_32(0);
1226
1227         req.stats_dma_addr =
1228             rte_cpu_to_le_64(cpr->hw_stats_map);
1229
1230         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1231
1232         HWRM_CHECK_RESULT();
1233
1234         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1235
1236         HWRM_UNLOCK();
1237
1238         return rc;
1239 }
1240
1241 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1242                                 unsigned int idx __rte_unused)
1243 {
1244         int rc;
1245         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1246         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1247
1248         HWRM_PREP(req, STAT_CTX_FREE);
1249
1250         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1251
1252         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1253
1254         HWRM_CHECK_RESULT();
1255         HWRM_UNLOCK();
1256
1257         return rc;
1258 }
1259
1260 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1261 {
1262         int rc = 0, i, j;
1263         struct hwrm_vnic_alloc_input req = { 0 };
1264         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1265
1266         /* map ring groups to this vnic */
1267         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1268                 vnic->start_grp_id, vnic->end_grp_id);
1269         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1270                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1271         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1272         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1273         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1274         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1275         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1276                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1277         HWRM_PREP(req, VNIC_ALLOC);
1278
1279         if (vnic->func_default)
1280                 req.flags =
1281                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1282         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1283
1284         HWRM_CHECK_RESULT();
1285
1286         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1287         HWRM_UNLOCK();
1288         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1289         return rc;
1290 }
1291
1292 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1293                                         struct bnxt_vnic_info *vnic,
1294                                         struct bnxt_plcmodes_cfg *pmode)
1295 {
1296         int rc = 0;
1297         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1298         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1299
1300         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1301
1302         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1303
1304         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1305
1306         HWRM_CHECK_RESULT();
1307
1308         pmode->flags = rte_le_to_cpu_32(resp->flags);
1309         /* dflt_vnic bit doesn't exist in the _cfg command */
1310         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1311         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1312         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1313         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1314
1315         HWRM_UNLOCK();
1316
1317         return rc;
1318 }
1319
1320 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1321                                        struct bnxt_vnic_info *vnic,
1322                                        struct bnxt_plcmodes_cfg *pmode)
1323 {
1324         int rc = 0;
1325         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1326         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1327
1328         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1329
1330         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1331         req.flags = rte_cpu_to_le_32(pmode->flags);
1332         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1333         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1334         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1335         req.enables = rte_cpu_to_le_32(
1336             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1337             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1338             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1339         );
1340
1341         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1342
1343         HWRM_CHECK_RESULT();
1344         HWRM_UNLOCK();
1345
1346         return rc;
1347 }
1348
1349 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1350 {
1351         int rc = 0;
1352         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1353         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1354         uint32_t ctx_enable_flag = 0;
1355         struct bnxt_plcmodes_cfg pmodes;
1356
1357         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1358                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1359                 return rc;
1360         }
1361
1362         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1363         if (rc)
1364                 return rc;
1365
1366         HWRM_PREP(req, VNIC_CFG);
1367
1368         /* Only RSS support for now TBD: COS & LB */
1369         req.enables =
1370             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1371         if (vnic->lb_rule != 0xffff)
1372                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1373         if (vnic->cos_rule != 0xffff)
1374                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1375         if (vnic->rss_rule != 0xffff) {
1376                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1377                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1378         }
1379         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1380         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1381         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1382         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1383         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1384         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1385         req.mru = rte_cpu_to_le_16(vnic->mru);
1386         if (vnic->func_default)
1387                 req.flags |=
1388                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1389         if (vnic->vlan_strip)
1390                 req.flags |=
1391                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1392         if (vnic->bd_stall)
1393                 req.flags |=
1394                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1395         if (vnic->roce_dual)
1396                 req.flags |= rte_cpu_to_le_32(
1397                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1398         if (vnic->roce_only)
1399                 req.flags |= rte_cpu_to_le_32(
1400                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1401         if (vnic->rss_dflt_cr)
1402                 req.flags |= rte_cpu_to_le_32(
1403                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1404
1405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1406
1407         HWRM_CHECK_RESULT();
1408         HWRM_UNLOCK();
1409
1410         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1411
1412         return rc;
1413 }
1414
1415 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1416                 int16_t fw_vf_id)
1417 {
1418         int rc = 0;
1419         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1420         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1421
1422         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1423                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1424                 return rc;
1425         }
1426         HWRM_PREP(req, VNIC_QCFG);
1427
1428         req.enables =
1429                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1430         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1431         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1432
1433         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1434
1435         HWRM_CHECK_RESULT();
1436
1437         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1438         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1439         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1440         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1441         vnic->mru = rte_le_to_cpu_16(resp->mru);
1442         vnic->func_default = rte_le_to_cpu_32(
1443                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1444         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1445                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1446         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1447                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1448         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1449                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1450         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1451                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1452         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1453                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1454
1455         HWRM_UNLOCK();
1456
1457         return rc;
1458 }
1459
1460 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1461 {
1462         int rc = 0;
1463         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1464         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1465                                                 bp->hwrm_cmd_resp_addr;
1466
1467         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1468
1469         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1470
1471         HWRM_CHECK_RESULT();
1472
1473         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1474         HWRM_UNLOCK();
1475         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1476
1477         return rc;
1478 }
1479
1480 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1481 {
1482         int rc = 0;
1483         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1484         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1485                                                 bp->hwrm_cmd_resp_addr;
1486
1487         if (vnic->rss_rule == 0xffff) {
1488                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1489                 return rc;
1490         }
1491         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1492
1493         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1494
1495         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1496
1497         HWRM_CHECK_RESULT();
1498         HWRM_UNLOCK();
1499
1500         vnic->rss_rule = INVALID_HW_RING_ID;
1501
1502         return rc;
1503 }
1504
1505 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1506 {
1507         int rc = 0;
1508         struct hwrm_vnic_free_input req = {.req_type = 0 };
1509         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1510
1511         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1512                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1513                 return rc;
1514         }
1515
1516         HWRM_PREP(req, VNIC_FREE);
1517
1518         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1519
1520         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1521
1522         HWRM_CHECK_RESULT();
1523         HWRM_UNLOCK();
1524
1525         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1526         return rc;
1527 }
1528
1529 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1530                            struct bnxt_vnic_info *vnic)
1531 {
1532         int rc = 0;
1533         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1534         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1535
1536         HWRM_PREP(req, VNIC_RSS_CFG);
1537
1538         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1539         req.hash_mode_flags = vnic->hash_mode;
1540
1541         req.ring_grp_tbl_addr =
1542             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1543         req.hash_key_tbl_addr =
1544             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1545         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1546
1547         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1548
1549         HWRM_CHECK_RESULT();
1550         HWRM_UNLOCK();
1551
1552         return rc;
1553 }
1554
1555 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1556                         struct bnxt_vnic_info *vnic)
1557 {
1558         int rc = 0;
1559         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1560         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1561         uint16_t size;
1562
1563         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1564                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1565                 return rc;
1566         }
1567
1568         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1569
1570         req.flags = rte_cpu_to_le_32(
1571                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1572
1573         req.enables = rte_cpu_to_le_32(
1574                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1575
1576         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1577         size -= RTE_PKTMBUF_HEADROOM;
1578
1579         req.jumbo_thresh = rte_cpu_to_le_16(size);
1580         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1581
1582         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1583
1584         HWRM_CHECK_RESULT();
1585         HWRM_UNLOCK();
1586
1587         return rc;
1588 }
1589
1590 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1591                         struct bnxt_vnic_info *vnic, bool enable)
1592 {
1593         int rc = 0;
1594         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1595         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1596
1597         HWRM_PREP(req, VNIC_TPA_CFG);
1598
1599         if (enable) {
1600                 req.enables = rte_cpu_to_le_32(
1601                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1602                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1603                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1604                 req.flags = rte_cpu_to_le_32(
1605                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1606                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1607                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1608                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1609                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1610                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1611                 req.max_agg_segs = rte_cpu_to_le_16(5);
1612                 req.max_aggs =
1613                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1614                 req.min_agg_len = rte_cpu_to_le_32(512);
1615         }
1616         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1617
1618         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1619
1620         HWRM_CHECK_RESULT();
1621         HWRM_UNLOCK();
1622
1623         return rc;
1624 }
1625
1626 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1627 {
1628         struct hwrm_func_cfg_input req = {0};
1629         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1630         int rc;
1631
1632         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1633         req.enables = rte_cpu_to_le_32(
1634                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1635         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1636         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1637
1638         HWRM_PREP(req, FUNC_CFG);
1639
1640         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1641         HWRM_CHECK_RESULT();
1642         HWRM_UNLOCK();
1643
1644         bp->pf.vf_info[vf].random_mac = false;
1645
1646         return rc;
1647 }
1648
1649 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1650                                   uint64_t *dropped)
1651 {
1652         int rc = 0;
1653         struct hwrm_func_qstats_input req = {.req_type = 0};
1654         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1655
1656         HWRM_PREP(req, FUNC_QSTATS);
1657
1658         req.fid = rte_cpu_to_le_16(fid);
1659
1660         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1661
1662         HWRM_CHECK_RESULT();
1663
1664         if (dropped)
1665                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1666
1667         HWRM_UNLOCK();
1668
1669         return rc;
1670 }
1671
1672 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1673                           struct rte_eth_stats *stats)
1674 {
1675         int rc = 0;
1676         struct hwrm_func_qstats_input req = {.req_type = 0};
1677         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1678
1679         HWRM_PREP(req, FUNC_QSTATS);
1680
1681         req.fid = rte_cpu_to_le_16(fid);
1682
1683         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1684
1685         HWRM_CHECK_RESULT();
1686
1687         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1688         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1689         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1690         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1691         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1692         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1693
1694         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1695         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1696         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1697         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1698         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1699         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1700
1701         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1702         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1703         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1704
1705         HWRM_UNLOCK();
1706
1707         return rc;
1708 }
1709
1710 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1711 {
1712         int rc = 0;
1713         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1714         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1715
1716         HWRM_PREP(req, FUNC_CLR_STATS);
1717
1718         req.fid = rte_cpu_to_le_16(fid);
1719
1720         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1721
1722         HWRM_CHECK_RESULT();
1723         HWRM_UNLOCK();
1724
1725         return rc;
1726 }
1727
1728 /*
1729  * HWRM utility functions
1730  */
1731
1732 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1733 {
1734         unsigned int i;
1735         int rc = 0;
1736
1737         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1738                 struct bnxt_tx_queue *txq;
1739                 struct bnxt_rx_queue *rxq;
1740                 struct bnxt_cp_ring_info *cpr;
1741
1742                 if (i >= bp->rx_cp_nr_rings) {
1743                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1744                         cpr = txq->cp_ring;
1745                 } else {
1746                         rxq = bp->rx_queues[i];
1747                         cpr = rxq->cp_ring;
1748                 }
1749
1750                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1751                 if (rc)
1752                         return rc;
1753         }
1754         return 0;
1755 }
1756
1757 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1758 {
1759         int rc;
1760         unsigned int i;
1761         struct bnxt_cp_ring_info *cpr;
1762
1763         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1764
1765                 if (i >= bp->rx_cp_nr_rings) {
1766                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1767                 } else {
1768                         cpr = bp->rx_queues[i]->cp_ring;
1769                         bp->grp_info[i].fw_stats_ctx = -1;
1770                 }
1771                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1772                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1773                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1774                         if (rc)
1775                                 return rc;
1776                 }
1777         }
1778         return 0;
1779 }
1780
1781 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1782 {
1783         unsigned int i;
1784         int rc = 0;
1785
1786         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1787                 struct bnxt_tx_queue *txq;
1788                 struct bnxt_rx_queue *rxq;
1789                 struct bnxt_cp_ring_info *cpr;
1790
1791                 if (i >= bp->rx_cp_nr_rings) {
1792                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1793                         cpr = txq->cp_ring;
1794                 } else {
1795                         rxq = bp->rx_queues[i];
1796                         cpr = rxq->cp_ring;
1797                 }
1798
1799                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1800
1801                 if (rc)
1802                         return rc;
1803         }
1804         return rc;
1805 }
1806
1807 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1808 {
1809         uint16_t idx;
1810         uint32_t rc = 0;
1811
1812         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1813
1814                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1815                         continue;
1816
1817                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1818
1819                 if (rc)
1820                         return rc;
1821         }
1822         return rc;
1823 }
1824
1825 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1826 {
1827         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1828
1829         bnxt_hwrm_ring_free(bp, cp_ring,
1830                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1831         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1832         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1833                         sizeof(*cpr->cp_desc_ring));
1834         cpr->cp_raw_cons = 0;
1835 }
1836
1837 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1838 {
1839         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1840         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1841         struct bnxt_ring *ring = rxr->rx_ring_struct;
1842         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1843
1844         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1845                 bnxt_hwrm_ring_free(bp, ring,
1846                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1847                 ring->fw_ring_id = INVALID_HW_RING_ID;
1848                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1849                 memset(rxr->rx_desc_ring, 0,
1850                        rxr->rx_ring_struct->ring_size *
1851                        sizeof(*rxr->rx_desc_ring));
1852                 memset(rxr->rx_buf_ring, 0,
1853                        rxr->rx_ring_struct->ring_size *
1854                        sizeof(*rxr->rx_buf_ring));
1855                 rxr->rx_prod = 0;
1856         }
1857         ring = rxr->ag_ring_struct;
1858         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1859                 bnxt_hwrm_ring_free(bp, ring,
1860                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1861                 ring->fw_ring_id = INVALID_HW_RING_ID;
1862                 memset(rxr->ag_buf_ring, 0,
1863                        rxr->ag_ring_struct->ring_size *
1864                        sizeof(*rxr->ag_buf_ring));
1865                 rxr->ag_prod = 0;
1866                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1867         }
1868         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1869                 bnxt_free_cp_ring(bp, cpr);
1870
1871         bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1872 }
1873
1874 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1875 {
1876         unsigned int i;
1877
1878         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1879                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1880                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1881                 struct bnxt_ring *ring = txr->tx_ring_struct;
1882                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1883
1884                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1885                         bnxt_hwrm_ring_free(bp, ring,
1886                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1887                         ring->fw_ring_id = INVALID_HW_RING_ID;
1888                         memset(txr->tx_desc_ring, 0,
1889                                         txr->tx_ring_struct->ring_size *
1890                                         sizeof(*txr->tx_desc_ring));
1891                         memset(txr->tx_buf_ring, 0,
1892                                         txr->tx_ring_struct->ring_size *
1893                                         sizeof(*txr->tx_buf_ring));
1894                         txr->tx_prod = 0;
1895                         txr->tx_cons = 0;
1896                 }
1897                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1898                         bnxt_free_cp_ring(bp, cpr);
1899                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1900                 }
1901         }
1902
1903         for (i = 0; i < bp->rx_cp_nr_rings; i++)
1904                 bnxt_free_hwrm_rx_ring(bp, i);
1905
1906         return 0;
1907 }
1908
1909 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1910 {
1911         uint16_t i;
1912         uint32_t rc = 0;
1913
1914         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1915                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1916                 if (rc)
1917                         return rc;
1918         }
1919         return rc;
1920 }
1921
1922 void bnxt_free_hwrm_resources(struct bnxt *bp)
1923 {
1924         /* Release memzone */
1925         rte_free(bp->hwrm_cmd_resp_addr);
1926         rte_free(bp->hwrm_short_cmd_req_addr);
1927         bp->hwrm_cmd_resp_addr = NULL;
1928         bp->hwrm_short_cmd_req_addr = NULL;
1929         bp->hwrm_cmd_resp_dma_addr = 0;
1930         bp->hwrm_short_cmd_req_dma_addr = 0;
1931 }
1932
1933 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1934 {
1935         struct rte_pci_device *pdev = bp->pdev;
1936         char type[RTE_MEMZONE_NAMESIZE];
1937
1938         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1939                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1940         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1941         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1942         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1943         if (bp->hwrm_cmd_resp_addr == NULL)
1944                 return -ENOMEM;
1945         bp->hwrm_cmd_resp_dma_addr =
1946                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1947         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1948                 PMD_DRV_LOG(ERR,
1949                         "unable to map response address to physical memory\n");
1950                 return -ENOMEM;
1951         }
1952         rte_spinlock_init(&bp->hwrm_lock);
1953
1954         return 0;
1955 }
1956
1957 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1958 {
1959         struct bnxt_filter_info *filter;
1960         int rc = 0;
1961
1962         STAILQ_FOREACH(filter, &vnic->filter, next) {
1963                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1964                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1965                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1966                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1967                 else
1968                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1969                 //if (rc)
1970                         //break;
1971         }
1972         return rc;
1973 }
1974
1975 static int
1976 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1977 {
1978         struct bnxt_filter_info *filter;
1979         struct rte_flow *flow;
1980         int rc = 0;
1981
1982         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1983                 filter = flow->filter;
1984                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1985                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1986                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1987                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1988                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1989                 else
1990                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1991
1992                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1993                 rte_free(flow);
1994                 //if (rc)
1995                         //break;
1996         }
1997         return rc;
1998 }
1999
2000 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2001 {
2002         struct bnxt_filter_info *filter;
2003         int rc = 0;
2004
2005         STAILQ_FOREACH(filter, &vnic->filter, next) {
2006                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2007                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2008                                                      filter);
2009                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2010                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2011                                                          filter);
2012                 else
2013                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2014                                                      filter);
2015                 if (rc)
2016                         break;
2017         }
2018         return rc;
2019 }
2020
2021 void bnxt_free_tunnel_ports(struct bnxt *bp)
2022 {
2023         if (bp->vxlan_port_cnt)
2024                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2025                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2026         bp->vxlan_port = 0;
2027         if (bp->geneve_port_cnt)
2028                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2029                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2030         bp->geneve_port = 0;
2031 }
2032
2033 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2034 {
2035         int i;
2036
2037         if (bp->vnic_info == NULL)
2038                 return;
2039
2040         /*
2041          * Cleanup VNICs in reverse order, to make sure the L2 filter
2042          * from vnic0 is last to be cleaned up.
2043          */
2044         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2045                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2046
2047                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2048
2049                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2050
2051                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2052
2053                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2054
2055                 bnxt_hwrm_vnic_free(bp, vnic);
2056         }
2057         /* Ring resources */
2058         bnxt_free_all_hwrm_rings(bp);
2059         bnxt_free_all_hwrm_ring_grps(bp);
2060         bnxt_free_all_hwrm_stat_ctxs(bp);
2061         bnxt_free_tunnel_ports(bp);
2062 }
2063
2064 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2065 {
2066         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2067
2068         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2069                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2070
2071         switch (conf_link_speed) {
2072         case ETH_LINK_SPEED_10M_HD:
2073         case ETH_LINK_SPEED_100M_HD:
2074                 /* FALLTHROUGH */
2075                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2076         }
2077         return hw_link_duplex;
2078 }
2079
2080 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2081 {
2082         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2083 }
2084
2085 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2086 {
2087         uint16_t eth_link_speed = 0;
2088
2089         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2090                 return ETH_LINK_SPEED_AUTONEG;
2091
2092         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2093         case ETH_LINK_SPEED_100M:
2094         case ETH_LINK_SPEED_100M_HD:
2095                 /* FALLTHROUGH */
2096                 eth_link_speed =
2097                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2098                 break;
2099         case ETH_LINK_SPEED_1G:
2100                 eth_link_speed =
2101                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2102                 break;
2103         case ETH_LINK_SPEED_2_5G:
2104                 eth_link_speed =
2105                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2106                 break;
2107         case ETH_LINK_SPEED_10G:
2108                 eth_link_speed =
2109                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2110                 break;
2111         case ETH_LINK_SPEED_20G:
2112                 eth_link_speed =
2113                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2114                 break;
2115         case ETH_LINK_SPEED_25G:
2116                 eth_link_speed =
2117                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2118                 break;
2119         case ETH_LINK_SPEED_40G:
2120                 eth_link_speed =
2121                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2122                 break;
2123         case ETH_LINK_SPEED_50G:
2124                 eth_link_speed =
2125                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2126                 break;
2127         case ETH_LINK_SPEED_100G:
2128                 eth_link_speed =
2129                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2130                 break;
2131         default:
2132                 PMD_DRV_LOG(ERR,
2133                         "Unsupported link speed %d; default to AUTO\n",
2134                         conf_link_speed);
2135                 break;
2136         }
2137         return eth_link_speed;
2138 }
2139
2140 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2141                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2142                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2143                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2144
2145 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2146 {
2147         uint32_t one_speed;
2148
2149         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2150                 return 0;
2151
2152         if (link_speed & ETH_LINK_SPEED_FIXED) {
2153                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2154
2155                 if (one_speed & (one_speed - 1)) {
2156                         PMD_DRV_LOG(ERR,
2157                                 "Invalid advertised speeds (%u) for port %u\n",
2158                                 link_speed, port_id);
2159                         return -EINVAL;
2160                 }
2161                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2162                         PMD_DRV_LOG(ERR,
2163                                 "Unsupported advertised speed (%u) for port %u\n",
2164                                 link_speed, port_id);
2165                         return -EINVAL;
2166                 }
2167         } else {
2168                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2169                         PMD_DRV_LOG(ERR,
2170                                 "Unsupported advertised speeds (%u) for port %u\n",
2171                                 link_speed, port_id);
2172                         return -EINVAL;
2173                 }
2174         }
2175         return 0;
2176 }
2177
2178 static uint16_t
2179 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2180 {
2181         uint16_t ret = 0;
2182
2183         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2184                 if (bp->link_info.support_speeds)
2185                         return bp->link_info.support_speeds;
2186                 link_speed = BNXT_SUPPORTED_SPEEDS;
2187         }
2188
2189         if (link_speed & ETH_LINK_SPEED_100M)
2190                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2191         if (link_speed & ETH_LINK_SPEED_100M_HD)
2192                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2193         if (link_speed & ETH_LINK_SPEED_1G)
2194                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2195         if (link_speed & ETH_LINK_SPEED_2_5G)
2196                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2197         if (link_speed & ETH_LINK_SPEED_10G)
2198                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2199         if (link_speed & ETH_LINK_SPEED_20G)
2200                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2201         if (link_speed & ETH_LINK_SPEED_25G)
2202                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2203         if (link_speed & ETH_LINK_SPEED_40G)
2204                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2205         if (link_speed & ETH_LINK_SPEED_50G)
2206                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2207         if (link_speed & ETH_LINK_SPEED_100G)
2208                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2209         return ret;
2210 }
2211
2212 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2213 {
2214         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2215
2216         switch (hw_link_speed) {
2217         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2218                 eth_link_speed = ETH_SPEED_NUM_100M;
2219                 break;
2220         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2221                 eth_link_speed = ETH_SPEED_NUM_1G;
2222                 break;
2223         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2224                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2225                 break;
2226         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2227                 eth_link_speed = ETH_SPEED_NUM_10G;
2228                 break;
2229         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2230                 eth_link_speed = ETH_SPEED_NUM_20G;
2231                 break;
2232         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2233                 eth_link_speed = ETH_SPEED_NUM_25G;
2234                 break;
2235         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2236                 eth_link_speed = ETH_SPEED_NUM_40G;
2237                 break;
2238         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2239                 eth_link_speed = ETH_SPEED_NUM_50G;
2240                 break;
2241         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2242                 eth_link_speed = ETH_SPEED_NUM_100G;
2243                 break;
2244         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2245         default:
2246                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2247                         hw_link_speed);
2248                 break;
2249         }
2250         return eth_link_speed;
2251 }
2252
2253 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2254 {
2255         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2256
2257         switch (hw_link_duplex) {
2258         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2259         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2260                 /* FALLTHROUGH */
2261                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2262                 break;
2263         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2264                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2265                 break;
2266         default:
2267                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2268                         hw_link_duplex);
2269                 break;
2270         }
2271         return eth_link_duplex;
2272 }
2273
2274 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2275 {
2276         int rc = 0;
2277         struct bnxt_link_info *link_info = &bp->link_info;
2278
2279         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2280         if (rc) {
2281                 PMD_DRV_LOG(ERR,
2282                         "Get link config failed with rc %d\n", rc);
2283                 goto exit;
2284         }
2285         if (link_info->link_speed)
2286                 link->link_speed =
2287                         bnxt_parse_hw_link_speed(link_info->link_speed);
2288         else
2289                 link->link_speed = ETH_SPEED_NUM_NONE;
2290         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2291         link->link_status = link_info->link_up;
2292         link->link_autoneg = link_info->auto_mode ==
2293                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2294                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2295 exit:
2296         return rc;
2297 }
2298
2299 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2300 {
2301         int rc = 0;
2302         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2303         struct bnxt_link_info link_req;
2304         uint16_t speed, autoneg;
2305
2306         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2307                 return 0;
2308
2309         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2310                         bp->eth_dev->data->port_id);
2311         if (rc)
2312                 goto error;
2313
2314         memset(&link_req, 0, sizeof(link_req));
2315         link_req.link_up = link_up;
2316         if (!link_up)
2317                 goto port_phy_cfg;
2318
2319         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2320         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2321         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2322         /* Autoneg can be done only when the FW allows */
2323         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2324                                 bp->link_info.force_link_speed)) {
2325                 link_req.phy_flags |=
2326                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2327                 link_req.auto_link_speed_mask =
2328                         bnxt_parse_eth_link_speed_mask(bp,
2329                                                        dev_conf->link_speeds);
2330         } else {
2331                 if (bp->link_info.phy_type ==
2332                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2333                     bp->link_info.phy_type ==
2334                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2335                     bp->link_info.media_type ==
2336                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2337                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2338                         return -EINVAL;
2339                 }
2340
2341                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2342                 /* If user wants a particular speed try that first. */
2343                 if (speed)
2344                         link_req.link_speed = speed;
2345                 else if (bp->link_info.force_link_speed)
2346                         link_req.link_speed = bp->link_info.force_link_speed;
2347                 else
2348                         link_req.link_speed = bp->link_info.auto_link_speed;
2349         }
2350         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2351         link_req.auto_pause = bp->link_info.auto_pause;
2352         link_req.force_pause = bp->link_info.force_pause;
2353
2354 port_phy_cfg:
2355         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2356         if (rc) {
2357                 PMD_DRV_LOG(ERR,
2358                         "Set link config failed with rc %d\n", rc);
2359         }
2360
2361 error:
2362         return rc;
2363 }
2364
2365 /* JIRA 22088 */
2366 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2367 {
2368         struct hwrm_func_qcfg_input req = {0};
2369         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2370         uint16_t flags;
2371         int rc = 0;
2372
2373         HWRM_PREP(req, FUNC_QCFG);
2374         req.fid = rte_cpu_to_le_16(0xffff);
2375
2376         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2377
2378         HWRM_CHECK_RESULT();
2379
2380         /* Hard Coded.. 0xfff VLAN ID mask */
2381         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2382         flags = rte_le_to_cpu_16(resp->flags);
2383         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2384                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2385
2386         switch (resp->port_partition_type) {
2387         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2388         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2389         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2390                 /* FALLTHROUGH */
2391                 bp->port_partition_type = resp->port_partition_type;
2392                 break;
2393         default:
2394                 bp->port_partition_type = 0;
2395                 break;
2396         }
2397
2398         HWRM_UNLOCK();
2399
2400         return rc;
2401 }
2402
2403 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2404                                    struct hwrm_func_qcaps_output *qcaps)
2405 {
2406         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2407         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2408                sizeof(qcaps->mac_address));
2409         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2410         qcaps->max_rx_rings = fcfg->num_rx_rings;
2411         qcaps->max_tx_rings = fcfg->num_tx_rings;
2412         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2413         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2414         qcaps->max_vfs = 0;
2415         qcaps->first_vf_id = 0;
2416         qcaps->max_vnics = fcfg->num_vnics;
2417         qcaps->max_decap_records = 0;
2418         qcaps->max_encap_records = 0;
2419         qcaps->max_tx_wm_flows = 0;
2420         qcaps->max_tx_em_flows = 0;
2421         qcaps->max_rx_wm_flows = 0;
2422         qcaps->max_rx_em_flows = 0;
2423         qcaps->max_flow_id = 0;
2424         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2425         qcaps->max_sp_tx_rings = 0;
2426         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2427 }
2428
2429 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2430 {
2431         struct hwrm_func_cfg_input req = {0};
2432         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2433         int rc;
2434
2435         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2436                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2437                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2438                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2439                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2440                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2441                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2442                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2443                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2444                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2445         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2446         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2447         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2448                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2449                                    BNXT_NUM_VLANS);
2450         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2451         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2452         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2453         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2454         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2455         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2456         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2457         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2458         req.fid = rte_cpu_to_le_16(0xffff);
2459
2460         HWRM_PREP(req, FUNC_CFG);
2461
2462         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2463
2464         HWRM_CHECK_RESULT();
2465         HWRM_UNLOCK();
2466
2467         return rc;
2468 }
2469
2470 static void populate_vf_func_cfg_req(struct bnxt *bp,
2471                                      struct hwrm_func_cfg_input *req,
2472                                      int num_vfs)
2473 {
2474         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2475                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2476                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2477                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2478                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2479                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2480                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2481                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2482                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2483                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2484
2485         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2486                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2487                                     BNXT_NUM_VLANS);
2488         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2489                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2490                                     BNXT_NUM_VLANS);
2491         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2492                                                 (num_vfs + 1));
2493         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2494         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2495                                                (num_vfs + 1));
2496         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2497         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2498         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2499         /* TODO: For now, do not support VMDq/RFS on VFs. */
2500         req->num_vnics = rte_cpu_to_le_16(1);
2501         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2502                                                  (num_vfs + 1));
2503 }
2504
2505 static void add_random_mac_if_needed(struct bnxt *bp,
2506                                      struct hwrm_func_cfg_input *cfg_req,
2507                                      int vf)
2508 {
2509         struct ether_addr mac;
2510
2511         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2512                 return;
2513
2514         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2515                 cfg_req->enables |=
2516                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2517                 eth_random_addr(cfg_req->dflt_mac_addr);
2518                 bp->pf.vf_info[vf].random_mac = true;
2519         } else {
2520                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2521         }
2522 }
2523
2524 static void reserve_resources_from_vf(struct bnxt *bp,
2525                                       struct hwrm_func_cfg_input *cfg_req,
2526                                       int vf)
2527 {
2528         struct hwrm_func_qcaps_input req = {0};
2529         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2530         int rc;
2531
2532         /* Get the actual allocated values now */
2533         HWRM_PREP(req, FUNC_QCAPS);
2534         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2535         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2536
2537         if (rc) {
2538                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2539                 copy_func_cfg_to_qcaps(cfg_req, resp);
2540         } else if (resp->error_code) {
2541                 rc = rte_le_to_cpu_16(resp->error_code);
2542                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2543                 copy_func_cfg_to_qcaps(cfg_req, resp);
2544         }
2545
2546         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2547         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2548         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2549         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2550         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2551         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2552         /*
2553          * TODO: While not supporting VMDq with VFs, max_vnics is always
2554          * forced to 1 in this case
2555          */
2556         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2557         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2558
2559         HWRM_UNLOCK();
2560 }
2561
2562 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2563 {
2564         struct hwrm_func_qcfg_input req = {0};
2565         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2566         int rc;
2567
2568         /* Check for zero MAC address */
2569         HWRM_PREP(req, FUNC_QCFG);
2570         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2571         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2572         if (rc) {
2573                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2574                 return -1;
2575         } else if (resp->error_code) {
2576                 rc = rte_le_to_cpu_16(resp->error_code);
2577                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2578                 return -1;
2579         }
2580         rc = rte_le_to_cpu_16(resp->vlan);
2581
2582         HWRM_UNLOCK();
2583
2584         return rc;
2585 }
2586
2587 static int update_pf_resource_max(struct bnxt *bp)
2588 {
2589         struct hwrm_func_qcfg_input req = {0};
2590         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2591         int rc;
2592
2593         /* And copy the allocated numbers into the pf struct */
2594         HWRM_PREP(req, FUNC_QCFG);
2595         req.fid = rte_cpu_to_le_16(0xffff);
2596         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2597         HWRM_CHECK_RESULT();
2598
2599         /* Only TX ring value reflects actual allocation? TODO */
2600         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2601         bp->pf.evb_mode = resp->evb_mode;
2602
2603         HWRM_UNLOCK();
2604
2605         return rc;
2606 }
2607
2608 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2609 {
2610         int rc;
2611
2612         if (!BNXT_PF(bp)) {
2613                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2614                 return -1;
2615         }
2616
2617         rc = bnxt_hwrm_func_qcaps(bp);
2618         if (rc)
2619                 return rc;
2620
2621         bp->pf.func_cfg_flags &=
2622                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2623                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2624         bp->pf.func_cfg_flags |=
2625                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2626         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2627         return rc;
2628 }
2629
2630 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2631 {
2632         struct hwrm_func_cfg_input req = {0};
2633         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2634         int i;
2635         size_t sz;
2636         int rc = 0;
2637         size_t req_buf_sz;
2638
2639         if (!BNXT_PF(bp)) {
2640                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2641                 return -1;
2642         }
2643
2644         rc = bnxt_hwrm_func_qcaps(bp);
2645
2646         if (rc)
2647                 return rc;
2648
2649         bp->pf.active_vfs = num_vfs;
2650
2651         /*
2652          * First, configure the PF to only use one TX ring.  This ensures that
2653          * there are enough rings for all VFs.
2654          *
2655          * If we don't do this, when we call func_alloc() later, we will lock
2656          * extra rings to the PF that won't be available during func_cfg() of
2657          * the VFs.
2658          *
2659          * This has been fixed with firmware versions above 20.6.54
2660          */
2661         bp->pf.func_cfg_flags &=
2662                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2663                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2664         bp->pf.func_cfg_flags |=
2665                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2666         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2667         if (rc)
2668                 return rc;
2669
2670         /*
2671          * Now, create and register a buffer to hold forwarded VF requests
2672          */
2673         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2674         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2675                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2676         if (bp->pf.vf_req_buf == NULL) {
2677                 rc = -ENOMEM;
2678                 goto error_free;
2679         }
2680         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2681                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2682         for (i = 0; i < num_vfs; i++)
2683                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2684                                         (i * HWRM_MAX_REQ_LEN);
2685
2686         rc = bnxt_hwrm_func_buf_rgtr(bp);
2687         if (rc)
2688                 goto error_free;
2689
2690         populate_vf_func_cfg_req(bp, &req, num_vfs);
2691
2692         bp->pf.active_vfs = 0;
2693         for (i = 0; i < num_vfs; i++) {
2694                 add_random_mac_if_needed(bp, &req, i);
2695
2696                 HWRM_PREP(req, FUNC_CFG);
2697                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2698                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2699                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2700
2701                 /* Clear enable flag for next pass */
2702                 req.enables &= ~rte_cpu_to_le_32(
2703                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2704
2705                 if (rc || resp->error_code) {
2706                         PMD_DRV_LOG(ERR,
2707                                 "Failed to initizlie VF %d\n", i);
2708                         PMD_DRV_LOG(ERR,
2709                                 "Not all VFs available. (%d, %d)\n",
2710                                 rc, resp->error_code);
2711                         HWRM_UNLOCK();
2712                         break;
2713                 }
2714
2715                 HWRM_UNLOCK();
2716
2717                 reserve_resources_from_vf(bp, &req, i);
2718                 bp->pf.active_vfs++;
2719                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2720         }
2721
2722         /*
2723          * Now configure the PF to use "the rest" of the resources
2724          * We're using STD_TX_RING_MODE here though which will limit the TX
2725          * rings.  This will allow QoS to function properly.  Not setting this
2726          * will cause PF rings to break bandwidth settings.
2727          */
2728         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2729         if (rc)
2730                 goto error_free;
2731
2732         rc = update_pf_resource_max(bp);
2733         if (rc)
2734                 goto error_free;
2735
2736         return rc;
2737
2738 error_free:
2739         bnxt_hwrm_func_buf_unrgtr(bp);
2740         return rc;
2741 }
2742
2743 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2744 {
2745         struct hwrm_func_cfg_input req = {0};
2746         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2747         int rc;
2748
2749         HWRM_PREP(req, FUNC_CFG);
2750
2751         req.fid = rte_cpu_to_le_16(0xffff);
2752         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2753         req.evb_mode = bp->pf.evb_mode;
2754
2755         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2756         HWRM_CHECK_RESULT();
2757         HWRM_UNLOCK();
2758
2759         return rc;
2760 }
2761
2762 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2763                                 uint8_t tunnel_type)
2764 {
2765         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2766         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2767         int rc = 0;
2768
2769         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2770         req.tunnel_type = tunnel_type;
2771         req.tunnel_dst_port_val = port;
2772         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2773         HWRM_CHECK_RESULT();
2774
2775         switch (tunnel_type) {
2776         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2777                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2778                 bp->vxlan_port = port;
2779                 break;
2780         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2781                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2782                 bp->geneve_port = port;
2783                 break;
2784         default:
2785                 break;
2786         }
2787
2788         HWRM_UNLOCK();
2789
2790         return rc;
2791 }
2792
2793 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2794                                 uint8_t tunnel_type)
2795 {
2796         struct hwrm_tunnel_dst_port_free_input req = {0};
2797         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2798         int rc = 0;
2799
2800         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2801
2802         req.tunnel_type = tunnel_type;
2803         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2804         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2805
2806         HWRM_CHECK_RESULT();
2807         HWRM_UNLOCK();
2808
2809         return rc;
2810 }
2811
2812 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2813                                         uint32_t flags)
2814 {
2815         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2816         struct hwrm_func_cfg_input req = {0};
2817         int rc;
2818
2819         HWRM_PREP(req, FUNC_CFG);
2820
2821         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2822         req.flags = rte_cpu_to_le_32(flags);
2823         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2824
2825         HWRM_CHECK_RESULT();
2826         HWRM_UNLOCK();
2827
2828         return rc;
2829 }
2830
2831 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2832 {
2833         uint32_t *flag = flagp;
2834
2835         vnic->flags = *flag;
2836 }
2837
2838 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2839 {
2840         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2841 }
2842
2843 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2844 {
2845         int rc = 0;
2846         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2847         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2848
2849         HWRM_PREP(req, FUNC_BUF_RGTR);
2850
2851         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2852         req.req_buf_page_size = rte_cpu_to_le_16(
2853                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2854         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2855         req.req_buf_page_addr0 =
2856                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2857         if (req.req_buf_page_addr0 == 0) {
2858                 PMD_DRV_LOG(ERR,
2859                         "unable to map buffer address to physical memory\n");
2860                 return -ENOMEM;
2861         }
2862
2863         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2864
2865         HWRM_CHECK_RESULT();
2866         HWRM_UNLOCK();
2867
2868         return rc;
2869 }
2870
2871 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2872 {
2873         int rc = 0;
2874         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2875         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2876
2877         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2878
2879         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2880
2881         HWRM_CHECK_RESULT();
2882         HWRM_UNLOCK();
2883
2884         return rc;
2885 }
2886
2887 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2888 {
2889         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2890         struct hwrm_func_cfg_input req = {0};
2891         int rc;
2892
2893         HWRM_PREP(req, FUNC_CFG);
2894
2895         req.fid = rte_cpu_to_le_16(0xffff);
2896         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2897         req.enables = rte_cpu_to_le_32(
2898                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2899         req.async_event_cr = rte_cpu_to_le_16(
2900                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2901         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2902
2903         HWRM_CHECK_RESULT();
2904         HWRM_UNLOCK();
2905
2906         return rc;
2907 }
2908
2909 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2910 {
2911         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2912         struct hwrm_func_vf_cfg_input req = {0};
2913         int rc;
2914
2915         HWRM_PREP(req, FUNC_VF_CFG);
2916
2917         req.enables = rte_cpu_to_le_32(
2918                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2919         req.async_event_cr = rte_cpu_to_le_16(
2920                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2921         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2922
2923         HWRM_CHECK_RESULT();
2924         HWRM_UNLOCK();
2925
2926         return rc;
2927 }
2928
2929 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2930 {
2931         struct hwrm_func_cfg_input req = {0};
2932         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2933         uint16_t dflt_vlan, fid;
2934         uint32_t func_cfg_flags;
2935         int rc = 0;
2936
2937         HWRM_PREP(req, FUNC_CFG);
2938
2939         if (is_vf) {
2940                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2941                 fid = bp->pf.vf_info[vf].fid;
2942                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2943         } else {
2944                 fid = rte_cpu_to_le_16(0xffff);
2945                 func_cfg_flags = bp->pf.func_cfg_flags;
2946                 dflt_vlan = bp->vlan;
2947         }
2948
2949         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2950         req.fid = rte_cpu_to_le_16(fid);
2951         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2952         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2953
2954         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2955
2956         HWRM_CHECK_RESULT();
2957         HWRM_UNLOCK();
2958
2959         return rc;
2960 }
2961
2962 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2963                         uint16_t max_bw, uint16_t enables)
2964 {
2965         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2966         struct hwrm_func_cfg_input req = {0};
2967         int rc;
2968
2969         HWRM_PREP(req, FUNC_CFG);
2970
2971         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2972         req.enables |= rte_cpu_to_le_32(enables);
2973         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2974         req.max_bw = rte_cpu_to_le_32(max_bw);
2975         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2976
2977         HWRM_CHECK_RESULT();
2978         HWRM_UNLOCK();
2979
2980         return rc;
2981 }
2982
2983 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2984 {
2985         struct hwrm_func_cfg_input req = {0};
2986         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2987         int rc = 0;
2988
2989         HWRM_PREP(req, FUNC_CFG);
2990
2991         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2992         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2993         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2994         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2995
2996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2997
2998         HWRM_CHECK_RESULT();
2999         HWRM_UNLOCK();
3000
3001         return rc;
3002 }
3003
3004 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3005 {
3006         int rc;
3007
3008         if (BNXT_PF(bp))
3009                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3010         else
3011                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3012
3013         return rc;
3014 }
3015
3016 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3017                               void *encaped, size_t ec_size)
3018 {
3019         int rc = 0;
3020         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3021         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3022
3023         if (ec_size > sizeof(req.encap_request))
3024                 return -1;
3025
3026         HWRM_PREP(req, REJECT_FWD_RESP);
3027
3028         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3029         memcpy(req.encap_request, encaped, ec_size);
3030
3031         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3032
3033         HWRM_CHECK_RESULT();
3034         HWRM_UNLOCK();
3035
3036         return rc;
3037 }
3038
3039 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3040                                        struct ether_addr *mac)
3041 {
3042         struct hwrm_func_qcfg_input req = {0};
3043         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3044         int rc;
3045
3046         HWRM_PREP(req, FUNC_QCFG);
3047
3048         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3049         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3050
3051         HWRM_CHECK_RESULT();
3052
3053         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3054
3055         HWRM_UNLOCK();
3056
3057         return rc;
3058 }
3059
3060 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3061                             void *encaped, size_t ec_size)
3062 {
3063         int rc = 0;
3064         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3065         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3066
3067         if (ec_size > sizeof(req.encap_request))
3068                 return -1;
3069
3070         HWRM_PREP(req, EXEC_FWD_RESP);
3071
3072         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3073         memcpy(req.encap_request, encaped, ec_size);
3074
3075         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3076
3077         HWRM_CHECK_RESULT();
3078         HWRM_UNLOCK();
3079
3080         return rc;
3081 }
3082
3083 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3084                          struct rte_eth_stats *stats, uint8_t rx)
3085 {
3086         int rc = 0;
3087         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3088         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3089
3090         HWRM_PREP(req, STAT_CTX_QUERY);
3091
3092         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3093
3094         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3095
3096         HWRM_CHECK_RESULT();
3097
3098         if (rx) {
3099                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3100                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3101                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3102                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3103                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3104                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3105                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3106                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3107         } else {
3108                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3109                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3110                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3111                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3112                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3113                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3114                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3115         }
3116
3117
3118         HWRM_UNLOCK();
3119
3120         return rc;
3121 }
3122
3123 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3124 {
3125         struct hwrm_port_qstats_input req = {0};
3126         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3127         struct bnxt_pf_info *pf = &bp->pf;
3128         int rc;
3129
3130         HWRM_PREP(req, PORT_QSTATS);
3131
3132         req.port_id = rte_cpu_to_le_16(pf->port_id);
3133         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3134         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3135         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3136
3137         HWRM_CHECK_RESULT();
3138         HWRM_UNLOCK();
3139
3140         return rc;
3141 }
3142
3143 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3144 {
3145         struct hwrm_port_clr_stats_input req = {0};
3146         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3147         struct bnxt_pf_info *pf = &bp->pf;
3148         int rc;
3149
3150         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3151         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3152             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3153                 return 0;
3154
3155         HWRM_PREP(req, PORT_CLR_STATS);
3156
3157         req.port_id = rte_cpu_to_le_16(pf->port_id);
3158         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3159
3160         HWRM_CHECK_RESULT();
3161         HWRM_UNLOCK();
3162
3163         return rc;
3164 }
3165
3166 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3167 {
3168         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3169         struct hwrm_port_led_qcaps_input req = {0};
3170         int rc;
3171
3172         if (BNXT_VF(bp))
3173                 return 0;
3174
3175         HWRM_PREP(req, PORT_LED_QCAPS);
3176         req.port_id = bp->pf.port_id;
3177         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3178
3179         HWRM_CHECK_RESULT();
3180
3181         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3182                 unsigned int i;
3183
3184                 bp->num_leds = resp->num_leds;
3185                 memcpy(bp->leds, &resp->led0_id,
3186                         sizeof(bp->leds[0]) * bp->num_leds);
3187                 for (i = 0; i < bp->num_leds; i++) {
3188                         struct bnxt_led_info *led = &bp->leds[i];
3189
3190                         uint16_t caps = led->led_state_caps;
3191
3192                         if (!led->led_group_id ||
3193                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3194                                 bp->num_leds = 0;
3195                                 break;
3196                         }
3197                 }
3198         }
3199
3200         HWRM_UNLOCK();
3201
3202         return rc;
3203 }
3204
3205 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3206 {
3207         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3208         struct hwrm_port_led_cfg_input req = {0};
3209         struct bnxt_led_cfg *led_cfg;
3210         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3211         uint16_t duration = 0;
3212         int rc, i;
3213
3214         if (!bp->num_leds || BNXT_VF(bp))
3215                 return -EOPNOTSUPP;
3216
3217         HWRM_PREP(req, PORT_LED_CFG);
3218
3219         if (led_on) {
3220                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3221                 duration = rte_cpu_to_le_16(500);
3222         }
3223         req.port_id = bp->pf.port_id;
3224         req.num_leds = bp->num_leds;
3225         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3226         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3227                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3228                 led_cfg->led_id = bp->leds[i].led_id;
3229                 led_cfg->led_state = led_state;
3230                 led_cfg->led_blink_on = duration;
3231                 led_cfg->led_blink_off = duration;
3232                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3233         }
3234
3235         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3236
3237         HWRM_CHECK_RESULT();
3238         HWRM_UNLOCK();
3239
3240         return rc;
3241 }
3242
3243 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3244                                uint32_t *length)
3245 {
3246         int rc;
3247         struct hwrm_nvm_get_dir_info_input req = {0};
3248         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3249
3250         HWRM_PREP(req, NVM_GET_DIR_INFO);
3251
3252         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3253
3254         HWRM_CHECK_RESULT();
3255         HWRM_UNLOCK();
3256
3257         if (!rc) {
3258                 *entries = rte_le_to_cpu_32(resp->entries);
3259                 *length = rte_le_to_cpu_32(resp->entry_length);
3260         }
3261         return rc;
3262 }
3263
3264 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3265 {
3266         int rc;
3267         uint32_t dir_entries;
3268         uint32_t entry_length;
3269         uint8_t *buf;
3270         size_t buflen;
3271         rte_iova_t dma_handle;
3272         struct hwrm_nvm_get_dir_entries_input req = {0};
3273         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3274
3275         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3276         if (rc != 0)
3277                 return rc;
3278
3279         *data++ = dir_entries;
3280         *data++ = entry_length;
3281         len -= 2;
3282         memset(data, 0xff, len);
3283
3284         buflen = dir_entries * entry_length;
3285         buf = rte_malloc("nvm_dir", buflen, 0);
3286         rte_mem_lock_page(buf);
3287         if (buf == NULL)
3288                 return -ENOMEM;
3289         dma_handle = rte_mem_virt2iova(buf);
3290         if (dma_handle == 0) {
3291                 PMD_DRV_LOG(ERR,
3292                         "unable to map response address to physical memory\n");
3293                 return -ENOMEM;
3294         }
3295         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3296         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3297         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3298
3299         HWRM_CHECK_RESULT();
3300         HWRM_UNLOCK();
3301
3302         if (rc == 0)
3303                 memcpy(data, buf, len > buflen ? buflen : len);
3304
3305         rte_free(buf);
3306
3307         return rc;
3308 }
3309
3310 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3311                              uint32_t offset, uint32_t length,
3312                              uint8_t *data)
3313 {
3314         int rc;
3315         uint8_t *buf;
3316         rte_iova_t dma_handle;
3317         struct hwrm_nvm_read_input req = {0};
3318         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3319
3320         buf = rte_malloc("nvm_item", length, 0);
3321         rte_mem_lock_page(buf);
3322         if (!buf)
3323                 return -ENOMEM;
3324
3325         dma_handle = rte_mem_virt2iova(buf);
3326         if (dma_handle == 0) {
3327                 PMD_DRV_LOG(ERR,
3328                         "unable to map response address to physical memory\n");
3329                 return -ENOMEM;
3330         }
3331         HWRM_PREP(req, NVM_READ);
3332         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3333         req.dir_idx = rte_cpu_to_le_16(index);
3334         req.offset = rte_cpu_to_le_32(offset);
3335         req.len = rte_cpu_to_le_32(length);
3336         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3337         HWRM_CHECK_RESULT();
3338         HWRM_UNLOCK();
3339         if (rc == 0)
3340                 memcpy(data, buf, length);
3341
3342         rte_free(buf);
3343         return rc;
3344 }
3345
3346 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3347 {
3348         int rc;
3349         struct hwrm_nvm_erase_dir_entry_input req = {0};
3350         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3351
3352         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3353         req.dir_idx = rte_cpu_to_le_16(index);
3354         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3355         HWRM_CHECK_RESULT();
3356         HWRM_UNLOCK();
3357
3358         return rc;
3359 }
3360
3361
3362 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3363                           uint16_t dir_ordinal, uint16_t dir_ext,
3364                           uint16_t dir_attr, const uint8_t *data,
3365                           size_t data_len)
3366 {
3367         int rc;
3368         struct hwrm_nvm_write_input req = {0};
3369         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3370         rte_iova_t dma_handle;
3371         uint8_t *buf;
3372
3373         HWRM_PREP(req, NVM_WRITE);
3374
3375         req.dir_type = rte_cpu_to_le_16(dir_type);
3376         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3377         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3378         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3379         req.dir_data_length = rte_cpu_to_le_32(data_len);
3380
3381         buf = rte_malloc("nvm_write", data_len, 0);
3382         rte_mem_lock_page(buf);
3383         if (!buf)
3384                 return -ENOMEM;
3385
3386         dma_handle = rte_mem_virt2iova(buf);
3387         if (dma_handle == 0) {
3388                 PMD_DRV_LOG(ERR,
3389                         "unable to map response address to physical memory\n");
3390                 return -ENOMEM;
3391         }
3392         memcpy(buf, data, data_len);
3393         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3394
3395         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3396
3397         HWRM_CHECK_RESULT();
3398         HWRM_UNLOCK();
3399
3400         rte_free(buf);
3401         return rc;
3402 }
3403
3404 static void
3405 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3406 {
3407         uint32_t *count = cbdata;
3408
3409         *count = *count + 1;
3410 }
3411
3412 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3413                                      struct bnxt_vnic_info *vnic __rte_unused)
3414 {
3415         return 0;
3416 }
3417
3418 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3419 {
3420         uint32_t count = 0;
3421
3422         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3423             &count, bnxt_vnic_count_hwrm_stub);
3424
3425         return count;
3426 }
3427
3428 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3429                                         uint16_t *vnic_ids)
3430 {
3431         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3432         struct hwrm_func_vf_vnic_ids_query_output *resp =
3433                                                 bp->hwrm_cmd_resp_addr;
3434         int rc;
3435
3436         /* First query all VNIC ids */
3437         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3438
3439         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3440         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3441         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3442
3443         if (req.vnic_id_tbl_addr == 0) {
3444                 HWRM_UNLOCK();
3445                 PMD_DRV_LOG(ERR,
3446                 "unable to map VNIC ID table address to physical memory\n");
3447                 return -ENOMEM;
3448         }
3449         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3450         if (rc) {
3451                 HWRM_UNLOCK();
3452                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3453                 return -1;
3454         } else if (resp->error_code) {
3455                 rc = rte_le_to_cpu_16(resp->error_code);
3456                 HWRM_UNLOCK();
3457                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3458                 return -1;
3459         }
3460         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3461
3462         HWRM_UNLOCK();
3463
3464         return rc;
3465 }
3466
3467 /*
3468  * This function queries the VNIC IDs  for a specified VF. It then calls
3469  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3470  * Then it calls the hwrm_cb function to program this new vnic configuration.
3471  */
3472 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3473         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3474         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3475 {
3476         struct bnxt_vnic_info vnic;
3477         int rc = 0;
3478         int i, num_vnic_ids;
3479         uint16_t *vnic_ids;
3480         size_t vnic_id_sz;
3481         size_t sz;
3482
3483         /* First query all VNIC ids */
3484         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3485         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3486                         RTE_CACHE_LINE_SIZE);
3487         if (vnic_ids == NULL) {
3488                 rc = -ENOMEM;
3489                 return rc;
3490         }
3491         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3492                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3493
3494         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3495
3496         if (num_vnic_ids < 0)
3497                 return num_vnic_ids;
3498
3499         /* Retrieve VNIC, update bd_stall then update */
3500
3501         for (i = 0; i < num_vnic_ids; i++) {
3502                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3503                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3504                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3505                 if (rc)
3506                         break;
3507                 if (vnic.mru <= 4)      /* Indicates unallocated */
3508                         continue;
3509
3510                 vnic_cb(&vnic, cbdata);
3511
3512                 rc = hwrm_cb(bp, &vnic);
3513                 if (rc)
3514                         break;
3515         }
3516
3517         rte_free(vnic_ids);
3518
3519         return rc;
3520 }
3521
3522 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3523                                               bool on)
3524 {
3525         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3526         struct hwrm_func_cfg_input req = {0};
3527         int rc;
3528
3529         HWRM_PREP(req, FUNC_CFG);
3530
3531         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3532         req.enables |= rte_cpu_to_le_32(
3533                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3534         req.vlan_antispoof_mode = on ?
3535                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3536                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3537         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3538
3539         HWRM_CHECK_RESULT();
3540         HWRM_UNLOCK();
3541
3542         return rc;
3543 }
3544
3545 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3546 {
3547         struct bnxt_vnic_info vnic;
3548         uint16_t *vnic_ids;
3549         size_t vnic_id_sz;
3550         int num_vnic_ids, i;
3551         size_t sz;
3552         int rc;
3553
3554         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3555         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3556                         RTE_CACHE_LINE_SIZE);
3557         if (vnic_ids == NULL) {
3558                 rc = -ENOMEM;
3559                 return rc;
3560         }
3561
3562         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3563                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3564
3565         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3566         if (rc <= 0)
3567                 goto exit;
3568         num_vnic_ids = rc;
3569
3570         /*
3571          * Loop through to find the default VNIC ID.
3572          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3573          * by sending the hwrm_func_qcfg command to the firmware.
3574          */
3575         for (i = 0; i < num_vnic_ids; i++) {
3576                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3577                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3578                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3579                                         bp->pf.first_vf_id + vf);
3580                 if (rc)
3581                         goto exit;
3582                 if (vnic.func_default) {
3583                         rte_free(vnic_ids);
3584                         return vnic.fw_vnic_id;
3585                 }
3586         }
3587         /* Could not find a default VNIC. */
3588         PMD_DRV_LOG(ERR, "No default VNIC\n");
3589 exit:
3590         rte_free(vnic_ids);
3591         return -1;
3592 }
3593
3594 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3595                          uint16_t dst_id,
3596                          struct bnxt_filter_info *filter)
3597 {
3598         int rc = 0;
3599         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3600         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3601         uint32_t enables = 0;
3602
3603         if (filter->fw_em_filter_id != UINT64_MAX)
3604                 bnxt_hwrm_clear_em_filter(bp, filter);
3605
3606         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3607
3608         req.flags = rte_cpu_to_le_32(filter->flags);
3609
3610         enables = filter->enables |
3611               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3612         req.dst_id = rte_cpu_to_le_16(dst_id);
3613
3614         if (filter->ip_addr_type) {
3615                 req.ip_addr_type = filter->ip_addr_type;
3616                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3617         }
3618         if (enables &
3619             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3620                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3621         if (enables &
3622             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3623                 memcpy(req.src_macaddr, filter->src_macaddr,
3624                        ETHER_ADDR_LEN);
3625         if (enables &
3626             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3627                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3628                        ETHER_ADDR_LEN);
3629         if (enables &
3630             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3631                 req.ovlan_vid = filter->l2_ovlan;
3632         if (enables &
3633             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3634                 req.ivlan_vid = filter->l2_ivlan;
3635         if (enables &
3636             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3637                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3638         if (enables &
3639             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3640                 req.ip_protocol = filter->ip_protocol;
3641         if (enables &
3642             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3643                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3644         if (enables &
3645             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3646                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3647         if (enables &
3648             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3649                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3650         if (enables &
3651             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3652                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3653         if (enables &
3654             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3655                 req.mirror_vnic_id = filter->mirror_vnic_id;
3656
3657         req.enables = rte_cpu_to_le_32(enables);
3658
3659         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3660
3661         HWRM_CHECK_RESULT();
3662
3663         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3664         HWRM_UNLOCK();
3665
3666         return rc;
3667 }
3668
3669 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3670 {
3671         int rc = 0;
3672         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3673         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3674
3675         if (filter->fw_em_filter_id == UINT64_MAX)
3676                 return 0;
3677
3678         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3679         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3680
3681         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3682
3683         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3684
3685         HWRM_CHECK_RESULT();
3686         HWRM_UNLOCK();
3687
3688         filter->fw_em_filter_id = UINT64_MAX;
3689         filter->fw_l2_filter_id = UINT64_MAX;
3690
3691         return 0;
3692 }
3693
3694 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3695                          uint16_t dst_id,
3696                          struct bnxt_filter_info *filter)
3697 {
3698         int rc = 0;
3699         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3700         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3701                                                 bp->hwrm_cmd_resp_addr;
3702         uint32_t enables = 0;
3703
3704         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3705                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3706
3707         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3708
3709         req.flags = rte_cpu_to_le_32(filter->flags);
3710
3711         enables = filter->enables |
3712               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3713         req.dst_id = rte_cpu_to_le_16(dst_id);
3714
3715
3716         if (filter->ip_addr_type) {
3717                 req.ip_addr_type = filter->ip_addr_type;
3718                 enables |=
3719                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3720         }
3721         if (enables &
3722             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3723                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3724         if (enables &
3725             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3726                 memcpy(req.src_macaddr, filter->src_macaddr,
3727                        ETHER_ADDR_LEN);
3728         //if (enables &
3729             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3730                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3731                        //ETHER_ADDR_LEN);
3732         if (enables &
3733             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3734                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3735         if (enables &
3736             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3737                 req.ip_protocol = filter->ip_protocol;
3738         if (enables &
3739             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3740                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3741         if (enables &
3742             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3743                 req.src_ipaddr_mask[0] =
3744                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3745         if (enables &
3746             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3747                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3748         if (enables &
3749             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3750                 req.dst_ipaddr_mask[0] =
3751                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3752         if (enables &
3753             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3754                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3755         if (enables &
3756             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3757                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3758         if (enables &
3759             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3760                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3761         if (enables &
3762             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3763                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3764         if (enables &
3765             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3766                 req.mirror_vnic_id = filter->mirror_vnic_id;
3767
3768         req.enables = rte_cpu_to_le_32(enables);
3769
3770         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3771
3772         HWRM_CHECK_RESULT();
3773
3774         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3775         HWRM_UNLOCK();
3776
3777         return rc;
3778 }
3779
3780 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3781                                 struct bnxt_filter_info *filter)
3782 {
3783         int rc = 0;
3784         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3785         struct hwrm_cfa_ntuple_filter_free_output *resp =
3786                                                 bp->hwrm_cmd_resp_addr;
3787
3788         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3789                 return 0;
3790
3791         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3792
3793         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3794
3795         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3796
3797         HWRM_CHECK_RESULT();
3798         HWRM_UNLOCK();
3799
3800         filter->fw_ntuple_filter_id = UINT64_MAX;
3801
3802         return 0;
3803 }
3804
3805 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3806 {
3807         unsigned int rss_idx, fw_idx, i;
3808
3809         if (vnic->rss_table && vnic->hash_type) {
3810                 /*
3811                  * Fill the RSS hash & redirection table with
3812                  * ring group ids for all VNICs
3813                  */
3814                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3815                         rss_idx++, fw_idx++) {
3816                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3817                                 fw_idx %= bp->rx_cp_nr_rings;
3818                                 if (vnic->fw_grp_ids[fw_idx] !=
3819                                     INVALID_HW_RING_ID)
3820                                         break;
3821                                 fw_idx++;
3822                         }
3823                         if (i == bp->rx_cp_nr_rings)
3824                                 return 0;
3825                         vnic->rss_table[rss_idx] =
3826                                 vnic->fw_grp_ids[fw_idx];
3827                 }
3828                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3829         }
3830         return 0;
3831 }
3832
3833 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3834         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3835 {
3836         uint16_t flags;
3837
3838         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3839
3840         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3841         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3842
3843         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3844         req->num_cmpl_dma_aggr_during_int =
3845                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3846
3847         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3848
3849         /* min timer set to 1/2 of interrupt timer */
3850         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3851
3852         /* buf timer set to 1/4 of interrupt timer */
3853         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3854
3855         req->cmpl_aggr_dma_tmr_during_int =
3856                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3857
3858         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3859                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3860         req->flags = rte_cpu_to_le_16(flags);
3861 }
3862
3863 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3864                         struct bnxt_coal *coal, uint16_t ring_id)
3865 {
3866         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3867         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3868                                                 bp->hwrm_cmd_resp_addr;
3869         int rc;
3870
3871         /* Set ring coalesce parameters only for Stratus 100G NIC */
3872         if (!bnxt_stratus_device(bp))
3873                 return 0;
3874
3875         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
3876         bnxt_hwrm_set_coal_params(coal, &req);
3877         req.ring_id = rte_cpu_to_le_16(ring_id);
3878         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3879         HWRM_CHECK_RESULT();
3880         HWRM_UNLOCK();
3881         return 0;
3882 }