1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7 #include <rte_memzone.h>
13 #include "ecore_dev_api.h"
14 #include "ecore_iov_api.h"
15 #include "ecore_mcp_api.h"
16 #include "ecore_l2_api.h"
17 #include "../qede_sriov.h"
19 int osal_pf_vf_msg(struct ecore_hwfn *p_hwfn)
23 rc = qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
25 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
26 "Failed to schedule alarm handler rc=%d\n", rc);
32 void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
34 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
39 OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
40 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
41 OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
44 /* Array of memzone pointers */
45 static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
46 /* Counter to track current memzone allocated */
47 static uint16_t ecore_mz_count;
49 unsigned long qede_log2_align(unsigned long n)
51 unsigned long ret = n ? 1 : 0;
52 unsigned long _n = n >> 1;
65 u32 qede_osal_log2(u32 val)
75 static inline u32 qede_ffb(unsigned long word)
77 unsigned long first_bit;
79 first_bit = __builtin_ffsl(word);
80 return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
83 inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
87 OSAL_BUILD_BUG_ON(!limit);
88 nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
89 for (i = 0; i < nwords; i++)
93 return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
96 static inline u32 qede_ffz(unsigned long word)
98 unsigned long first_zero;
100 first_zero = __builtin_ffsl(~word);
101 return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
104 inline u32 qede_find_first_zero_bit(u32 *addr, u32 limit)
108 OSAL_BUILD_BUG_ON(!limit);
109 nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
110 for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
111 return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
114 void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
115 __rte_unused struct vf_pf_resc_request *resc_req,
116 struct ecore_vf_acquire_sw_info *vf_sw_info)
118 vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
119 vf_sw_info->override_fw_version = 1;
122 void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
123 dma_addr_t *phys, size_t size)
125 const struct rte_memzone *mz;
126 char mz_name[RTE_MEMZONE_NAMESIZE];
127 uint32_t core_id = rte_lcore_id();
128 unsigned int socket_id;
130 if (ecore_mz_count >= RTE_MAX_MEMZONE) {
131 DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
137 OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
138 snprintf(mz_name, sizeof(mz_name), "%lx",
139 (unsigned long)rte_get_timer_cycles());
140 if (core_id == (unsigned int)LCORE_ID_ANY)
141 core_id = rte_get_master_lcore();
142 socket_id = rte_lcore_to_socket_id(core_id);
143 mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
144 RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
146 DP_ERR(p_dev, "Unable to allocate DMA memory "
147 "of size %zu bytes - %s\n",
148 size, rte_strerror(rte_errno));
153 ecore_mz_mapping[ecore_mz_count++] = mz;
154 DP_VERBOSE(p_dev, ECORE_MSG_SP,
155 "Allocated dma memory size=%zu phys=0x%lx"
156 " virt=%p core=%d\n",
157 mz->len, (unsigned long)mz->iova, mz->addr, core_id);
161 void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
162 dma_addr_t *phys, size_t size, int align)
164 const struct rte_memzone *mz;
165 char mz_name[RTE_MEMZONE_NAMESIZE];
166 uint32_t core_id = rte_lcore_id();
167 unsigned int socket_id;
169 if (ecore_mz_count >= RTE_MAX_MEMZONE) {
170 DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
176 OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
177 snprintf(mz_name, sizeof(mz_name), "%lx",
178 (unsigned long)rte_get_timer_cycles());
179 if (core_id == (unsigned int)LCORE_ID_ANY)
180 core_id = rte_get_master_lcore();
181 socket_id = rte_lcore_to_socket_id(core_id);
182 mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
183 RTE_MEMZONE_IOVA_CONTIG, align);
185 DP_ERR(p_dev, "Unable to allocate DMA memory "
186 "of size %zu bytes - %s\n",
187 size, rte_strerror(rte_errno));
192 ecore_mz_mapping[ecore_mz_count++] = mz;
193 DP_VERBOSE(p_dev, ECORE_MSG_SP,
194 "Allocated aligned dma memory size=%zu phys=0x%lx"
195 " virt=%p core=%d\n",
196 mz->len, (unsigned long)mz->iova, mz->addr, core_id);
200 void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
204 for (j = 0 ; j < ecore_mz_count; j++) {
205 if (phys == ecore_mz_mapping[j]->iova) {
206 DP_VERBOSE(p_dev, ECORE_MSG_SP,
207 "Free memzone %s\n", ecore_mz_mapping[j]->name);
208 rte_memzone_free(ecore_mz_mapping[j]);
209 while (j < ecore_mz_count - 1) {
210 ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
218 DP_ERR(p_dev, "Unexpected memory free request\n");
221 #ifdef CONFIG_ECORE_ZIPPED_FW
222 u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
223 u8 *input_buf, u32 max_size, u8 *unzip_buf)
227 p_hwfn->stream->next_in = input_buf;
228 p_hwfn->stream->avail_in = input_len;
229 p_hwfn->stream->next_out = unzip_buf;
230 p_hwfn->stream->avail_out = max_size;
232 rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
236 "zlib init failed, rc = %d\n", rc);
240 rc = inflate(p_hwfn->stream, Z_FINISH);
241 inflateEnd(p_hwfn->stream);
243 if (rc != Z_OK && rc != Z_STREAM_END) {
245 "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
250 return p_hwfn->stream->total_out / 4;
255 qede_get_mcp_proto_stats(struct ecore_dev *edev,
256 enum ecore_mcp_protocol_type type,
257 union ecore_mcp_protocol_stats *stats)
259 struct ecore_eth_stats lan_stats;
261 if (type == ECORE_MCP_LAN_STATS) {
262 ecore_get_vport_stats(edev, &lan_stats);
265 stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
266 stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
268 stats->lan_stats.fcs_err = -1;
270 DP_INFO(edev, "Statistics request type %d not supported\n",
275 static void qede_hw_err_handler(void *dev, enum ecore_hw_err_type err_type)
277 struct ecore_dev *edev = dev;
280 case ECORE_HW_ERR_FAN_FAIL:
283 case ECORE_HW_ERR_MFW_RESP_FAIL:
284 case ECORE_HW_ERR_HW_ATTN:
285 case ECORE_HW_ERR_DMAE_FAIL:
286 case ECORE_HW_ERR_RAMROD_FAIL:
287 case ECORE_HW_ERR_FW_ASSERT:
288 OSAL_SAVE_FW_DUMP(0); /* Using port 0 as default port_id */
292 DP_NOTICE(edev, false, "Unknown HW error [%d]\n", err_type);
298 qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
303 case ECORE_HW_ERR_FAN_FAIL:
304 strcpy(err_str, "Fan Failure");
306 case ECORE_HW_ERR_MFW_RESP_FAIL:
307 strcpy(err_str, "MFW Response Failure");
309 case ECORE_HW_ERR_HW_ATTN:
310 strcpy(err_str, "HW Attention");
312 case ECORE_HW_ERR_DMAE_FAIL:
313 strcpy(err_str, "DMAE Failure");
315 case ECORE_HW_ERR_RAMROD_FAIL:
316 strcpy(err_str, "Ramrod Failure");
318 case ECORE_HW_ERR_FW_ASSERT:
319 strcpy(err_str, "FW Assertion");
322 strcpy(err_str, "Unknown");
325 DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
327 qede_hw_err_handler(p_hwfn->p_dev, err_type);
329 ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
332 u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
338 for (i = 0; i < 8; i++)
339 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
344 void qed_set_platform_str(struct ecore_hwfn *p_hwfn,
345 char *buf_str, u32 buf_size)
347 snprintf(buf_str, buf_size, "%s.", rte_version());