net/ena: remove redundant MTU verification
[dpdk.git] / drivers / net / bnxt / bnxt_cpr.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #ifndef _BNXT_CPR_H_
7 #define _BNXT_CPR_H_
8 #include <stdbool.h>
9
10 #include <rte_io.h>
11 #include "hsi_struct_def_dpdk.h"
12
13 struct bnxt_db_info;
14
15 #define CMP_TYPE(cmp)                                           \
16         (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
17
18 /* Get completion length from completion type, in 16-byte units. */
19 #define CMP_LEN(cmp_type) (((cmp_type) & 1) + 1)
20
21
22 #define ADV_RAW_CMP(idx, n)     ((idx) + (n))
23 #define NEXT_RAW_CMP(idx)       ADV_RAW_CMP(idx, 1)
24 #define RING_CMP(ring, idx)     ((idx) & (ring)->ring_mask)
25 #define RING_CMPL(ring_mask, idx)       ((idx) & (ring_mask))
26 #define NEXT_CMP(idx)           RING_CMP(ADV_RAW_CMP(idx, 1))
27
28 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
29 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
30
31 #define B_CP_DB_REARM(cpr, raw_cons)                                    \
32         rte_write32((DB_CP_REARM_FLAGS |                                \
33                     DB_RING_IDX(&((cpr)->cp_db), raw_cons)),            \
34                     ((cpr)->cp_db.doorbell))
35
36 #define B_CP_DB_ARM(cpr)        rte_write32((DB_KEY_CP),                \
37                                             ((cpr)->cp_db.doorbell))
38
39 #define B_CP_DB_DISARM(cpr)     (*(uint32_t *)((cpr)->cp_db.doorbell) = \
40                                  DB_KEY_CP | DB_IRQ_DIS)
41
42 #define B_CP_DIS_DB(cpr, raw_cons)                                      \
43         rte_write32_relaxed((DB_CP_FLAGS |                              \
44                     DB_RING_IDX(&((cpr)->cp_db), raw_cons)),            \
45                     ((cpr)->cp_db.doorbell))
46
47 struct bnxt_db_info {
48         void                    *doorbell;
49         union {
50                 uint64_t        db_key64;
51                 uint32_t        db_key32;
52         };
53         bool                    db_64;
54         uint32_t                db_ring_mask;
55         uint32_t                db_epoch_mask;
56         uint32_t                db_epoch_shift;
57 };
58
59 #define DB_EPOCH(db, idx)       (((idx) & (db)->db_epoch_mask) <<       \
60                                  ((db)->db_epoch_shift))
61 #define DB_RING_IDX(db, idx)    (((idx) & (db)->db_ring_mask) |         \
62                                  DB_EPOCH(db, idx))
63
64 struct bnxt_ring;
65 struct bnxt_cp_ring_info {
66         uint32_t                cp_raw_cons;
67
68         struct cmpl_base        *cp_desc_ring;
69         struct bnxt_db_info     cp_db;
70         rte_iova_t              cp_desc_mapping;
71
72         struct ctx_hw_stats     *hw_stats;
73         rte_iova_t              hw_stats_map;
74         uint32_t                hw_stats_ctx_id;
75
76         struct bnxt_ring        *cp_ring_struct;
77 };
78
79 #define RX_CMP_L2_ERRORS                                                \
80         (RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR)
81
82 struct bnxt;
83 void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
84 void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
85 int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp);
86 void bnxt_dev_reset_and_resume(void *arg);
87 void bnxt_wait_for_device_shutdown(struct bnxt *bp);
88
89 #define EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL     \
90         HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL
91 #define EVENT_DATA1_REASON_CODE_MASK                   \
92         HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK
93
94 #define EVENT_DATA1_FLAGS_MASK                         \
95         HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK
96
97 #define EVENT_DATA1_FLAGS_MASTER_FUNC                  \
98         HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC
99
100 #define EVENT_DATA1_FLAGS_RECOVERY_ENABLED             \
101         HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED
102
103 bool bnxt_is_recovery_enabled(struct bnxt *bp);
104 bool bnxt_is_primary_func(struct bnxt *bp);
105
106 void bnxt_stop_rxtx(struct rte_eth_dev *eth_dev);
107
108 /**
109  * Check validity of a completion ring entry. If the entry is valid, include a
110  * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
111  * completion are not hoisted by the compiler or by the CPU to come before the
112  * loading of the "valid" field.
113  *
114  * Note: the caller must not access any fields in the specified completion
115  * entry prior to calling this function.
116  *
117  * @param cmpl
118  *   Pointer to an entry in the completion ring.
119  * @param raw_cons
120  *   Raw consumer index of entry in completion ring.
121  * @param ring_size
122  *   Size of completion ring.
123  */
124 static __rte_always_inline bool
125 bnxt_cpr_cmp_valid(const void *cmpl, uint32_t raw_cons, uint32_t ring_size)
126 {
127         const struct cmpl_base *c = cmpl;
128         bool expected, valid;
129
130         expected = !(raw_cons & ring_size);
131         valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
132         if (valid == expected) {
133                 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
134                 return true;
135         }
136         return false;
137 }
138 #endif