net/qede: initialize VF MAC and link
[dpdk.git] / drivers / net / qede / base / bcm_osal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_memzone.h>
8 #include <rte_errno.h>
9
10 #include "bcm_osal.h"
11 #include "ecore.h"
12 #include "ecore_hw.h"
13 #include "ecore_dev_api.h"
14 #include "ecore_iov_api.h"
15 #include "ecore_mcp_api.h"
16 #include "ecore_l2_api.h"
17 #include "../qede_sriov.h"
18
19 int osal_pf_vf_msg(struct ecore_hwfn *p_hwfn)
20 {
21         int rc;
22
23         rc = qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
24         if (rc) {
25                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
26                            "Failed to schedule alarm handler rc=%d\n", rc);
27         }
28
29         return rc;
30 }
31
32 void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
33 {
34         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
35
36         if (!p_hwfn)
37                 return;
38
39         OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
40         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
41         OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
42 }
43
44 /* Array of memzone pointers */
45 static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
46 /* Counter to track current memzone allocated */
47 static uint16_t ecore_mz_count;
48
49 unsigned long qede_log2_align(unsigned long n)
50 {
51         unsigned long ret = n ? 1 : 0;
52         unsigned long _n = n >> 1;
53
54         while (_n) {
55                 _n >>= 1;
56                 ret <<= 1;
57         }
58
59         if (ret < n)
60                 ret <<= 1;
61
62         return ret;
63 }
64
65 u32 qede_osal_log2(u32 val)
66 {
67         u32 log = 0;
68
69         while (val >>= 1)
70                 log++;
71
72         return log;
73 }
74
75 static inline u32 qede_ffb(unsigned long word)
76 {
77         unsigned long first_bit;
78
79         first_bit = __builtin_ffsl(word);
80         return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
81 }
82
83 inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
84 {
85         u32 i;
86         u32 nwords = 0;
87         OSAL_BUILD_BUG_ON(!limit);
88         nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
89         for (i = 0; i < nwords; i++)
90                 if (addr[i] != 0)
91                         break;
92
93         return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
94 }
95
96 static inline u32 qede_ffz(unsigned long word)
97 {
98         unsigned long first_zero;
99
100         first_zero = __builtin_ffsl(~word);
101         return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
102 }
103
104 inline u32 qede_find_first_zero_bit(u32 *addr, u32 limit)
105 {
106         u32 i;
107         u32 nwords = 0;
108         OSAL_BUILD_BUG_ON(!limit);
109         nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
110         for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
111         return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
112 }
113
114 void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
115                               __rte_unused struct vf_pf_resc_request *resc_req,
116                               struct ecore_vf_acquire_sw_info *vf_sw_info)
117 {
118         vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
119         vf_sw_info->override_fw_version = 1;
120 }
121
122 void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
123                               dma_addr_t *phys, size_t size)
124 {
125         const struct rte_memzone *mz;
126         char mz_name[RTE_MEMZONE_NAMESIZE];
127         uint32_t core_id = rte_lcore_id();
128         unsigned int socket_id;
129
130         if (ecore_mz_count >= RTE_MAX_MEMZONE) {
131                 DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
132                        RTE_MAX_MEMZONE);
133                 *phys = 0;
134                 return OSAL_NULL;
135         }
136
137         OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
138         snprintf(mz_name, sizeof(mz_name), "%lx",
139                                         (unsigned long)rte_get_timer_cycles());
140         if (core_id == (unsigned int)LCORE_ID_ANY)
141                 core_id = rte_get_master_lcore();
142         socket_id = rte_lcore_to_socket_id(core_id);
143         mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
144                         RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
145         if (!mz) {
146                 DP_ERR(p_dev, "Unable to allocate DMA memory "
147                        "of size %zu bytes - %s\n",
148                        size, rte_strerror(rte_errno));
149                 *phys = 0;
150                 return OSAL_NULL;
151         }
152         *phys = mz->iova;
153         ecore_mz_mapping[ecore_mz_count++] = mz;
154         DP_VERBOSE(p_dev, ECORE_MSG_SP,
155                    "Allocated dma memory size=%zu phys=0x%lx"
156                    " virt=%p core=%d\n",
157                    mz->len, (unsigned long)mz->iova, mz->addr, core_id);
158         return mz->addr;
159 }
160
161 void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
162                                       dma_addr_t *phys, size_t size, int align)
163 {
164         const struct rte_memzone *mz;
165         char mz_name[RTE_MEMZONE_NAMESIZE];
166         uint32_t core_id = rte_lcore_id();
167         unsigned int socket_id;
168
169         if (ecore_mz_count >= RTE_MAX_MEMZONE) {
170                 DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
171                        RTE_MAX_MEMZONE);
172                 *phys = 0;
173                 return OSAL_NULL;
174         }
175
176         OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
177         snprintf(mz_name, sizeof(mz_name), "%lx",
178                                         (unsigned long)rte_get_timer_cycles());
179         if (core_id == (unsigned int)LCORE_ID_ANY)
180                 core_id = rte_get_master_lcore();
181         socket_id = rte_lcore_to_socket_id(core_id);
182         mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
183                         RTE_MEMZONE_IOVA_CONTIG, align);
184         if (!mz) {
185                 DP_ERR(p_dev, "Unable to allocate DMA memory "
186                        "of size %zu bytes - %s\n",
187                        size, rte_strerror(rte_errno));
188                 *phys = 0;
189                 return OSAL_NULL;
190         }
191         *phys = mz->iova;
192         ecore_mz_mapping[ecore_mz_count++] = mz;
193         DP_VERBOSE(p_dev, ECORE_MSG_SP,
194                    "Allocated aligned dma memory size=%zu phys=0x%lx"
195                    " virt=%p core=%d\n",
196                    mz->len, (unsigned long)mz->iova, mz->addr, core_id);
197         return mz->addr;
198 }
199
200 void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
201 {
202         uint16_t j;
203
204         for (j = 0 ; j < ecore_mz_count; j++) {
205                 if (phys == ecore_mz_mapping[j]->iova) {
206                         DP_VERBOSE(p_dev, ECORE_MSG_SP,
207                                 "Free memzone %s\n", ecore_mz_mapping[j]->name);
208                         rte_memzone_free(ecore_mz_mapping[j]);
209                         while (j < ecore_mz_count - 1) {
210                                 ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
211                                 j++;
212                         }
213                         ecore_mz_count--;
214                         return;
215                 }
216         }
217
218         DP_ERR(p_dev, "Unexpected memory free request\n");
219 }
220
221 #ifdef CONFIG_ECORE_ZIPPED_FW
222 u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
223                     u8 *input_buf, u32 max_size, u8 *unzip_buf)
224 {
225         int rc;
226
227         p_hwfn->stream->next_in = input_buf;
228         p_hwfn->stream->avail_in = input_len;
229         p_hwfn->stream->next_out = unzip_buf;
230         p_hwfn->stream->avail_out = max_size;
231
232         rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
233
234         if (rc != Z_OK) {
235                 DP_ERR(p_hwfn,
236                            "zlib init failed, rc = %d\n", rc);
237                 return 0;
238         }
239
240         rc = inflate(p_hwfn->stream, Z_FINISH);
241         inflateEnd(p_hwfn->stream);
242
243         if (rc != Z_OK && rc != Z_STREAM_END) {
244                 DP_ERR(p_hwfn,
245                            "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
246                            rc);
247                 return 0;
248         }
249
250         return p_hwfn->stream->total_out / 4;
251 }
252 #endif
253
254 void
255 qede_get_mcp_proto_stats(struct ecore_dev *edev,
256                          enum ecore_mcp_protocol_type type,
257                          union ecore_mcp_protocol_stats *stats)
258 {
259         struct ecore_eth_stats lan_stats;
260
261         if (type == ECORE_MCP_LAN_STATS) {
262                 ecore_get_vport_stats(edev, &lan_stats);
263
264                 /* @DPDK */
265                 stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
266                 stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
267
268                 stats->lan_stats.fcs_err = -1;
269         } else {
270                 DP_INFO(edev, "Statistics request type %d not supported\n",
271                        type);
272         }
273 }
274
275 static void qede_hw_err_handler(void *dev, enum ecore_hw_err_type err_type)
276 {
277         struct ecore_dev *edev = dev;
278
279         switch (err_type) {
280         case ECORE_HW_ERR_FAN_FAIL:
281                 break;
282
283         case ECORE_HW_ERR_MFW_RESP_FAIL:
284         case ECORE_HW_ERR_HW_ATTN:
285         case ECORE_HW_ERR_DMAE_FAIL:
286         case ECORE_HW_ERR_RAMROD_FAIL:
287         case ECORE_HW_ERR_FW_ASSERT:
288                 OSAL_SAVE_FW_DUMP(0); /* Using port 0 as default port_id */
289                 break;
290
291         default:
292                 DP_NOTICE(edev, false, "Unknown HW error [%d]\n", err_type);
293                 return;
294         }
295 }
296
297 void
298 qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
299 {
300         char err_str[64];
301
302         switch (err_type) {
303         case ECORE_HW_ERR_FAN_FAIL:
304                 strcpy(err_str, "Fan Failure");
305                 break;
306         case ECORE_HW_ERR_MFW_RESP_FAIL:
307                 strcpy(err_str, "MFW Response Failure");
308                 break;
309         case ECORE_HW_ERR_HW_ATTN:
310                 strcpy(err_str, "HW Attention");
311                 break;
312         case ECORE_HW_ERR_DMAE_FAIL:
313                 strcpy(err_str, "DMAE Failure");
314                 break;
315         case ECORE_HW_ERR_RAMROD_FAIL:
316                 strcpy(err_str, "Ramrod Failure");
317                 break;
318         case ECORE_HW_ERR_FW_ASSERT:
319                 strcpy(err_str, "FW Assertion");
320                 break;
321         default:
322                 strcpy(err_str, "Unknown");
323         }
324
325         DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
326
327         qede_hw_err_handler(p_hwfn->p_dev, err_type);
328
329         ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
330 }
331
332 u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
333 {
334         int i;
335
336         while (length--) {
337                 crc ^= *ptr++;
338                 for (i = 0; i < 8; i++)
339                         crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
340         }
341         return crc;
342 }
343
344 void qed_set_platform_str(struct ecore_hwfn *p_hwfn,
345                           char *buf_str, u32 buf_size)
346 {
347         snprintf(buf_str, buf_size, "%s.", rte_version());
348 }