1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_osdep.h"
35 #include "i40e_register.h"
36 #include "i40e_status.h"
37 #include "i40e_alloc.h"
39 #include "i40e_type.h"
42 * i40e_add_sd_table_entry - Adds a segment descriptor to the table
43 * @hw: pointer to our hw struct
44 * @hmc_info: pointer to the HMC configuration information struct
45 * @sd_index: segment descriptor index to manipulate
46 * @type: what type of segment descriptor we're manipulating
47 * @direct_mode_sz: size to alloc in direct mode
49 enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
50 struct i40e_hmc_info *hmc_info,
52 enum i40e_sd_entry_type type,
55 enum i40e_status_code ret_code = I40E_SUCCESS;
56 struct i40e_hmc_sd_entry *sd_entry;
57 enum i40e_memory_type mem_type;
58 bool dma_mem_alloc_done = false;
59 struct i40e_dma_mem mem;
62 if (NULL == hmc_info->sd_table.sd_entry) {
63 ret_code = I40E_ERR_BAD_PTR;
64 DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
68 if (sd_index >= hmc_info->sd_table.sd_cnt) {
69 ret_code = I40E_ERR_INVALID_SD_INDEX;
70 DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
74 sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
75 if (!sd_entry->valid) {
76 if (I40E_SD_TYPE_PAGED == type) {
77 mem_type = i40e_mem_pd;
78 alloc_len = I40E_HMC_PAGED_BP_SIZE;
80 mem_type = i40e_mem_bp_jumbo;
81 alloc_len = direct_mode_sz;
84 /* allocate a 4K pd page or 2M backing page */
85 ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
86 I40E_HMC_PD_BP_BUF_ALIGNMENT);
89 dma_mem_alloc_done = true;
90 if (I40E_SD_TYPE_PAGED == type) {
91 ret_code = i40e_allocate_virt_mem(hw,
92 &sd_entry->u.pd_table.pd_entry_virt_mem,
93 sizeof(struct i40e_hmc_pd_entry) * 512);
96 sd_entry->u.pd_table.pd_entry =
97 (struct i40e_hmc_pd_entry *)
98 sd_entry->u.pd_table.pd_entry_virt_mem.va;
99 i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
100 &mem, sizeof(struct i40e_dma_mem),
101 I40E_NONDMA_TO_NONDMA);
103 i40e_memcpy(&sd_entry->u.bp.addr,
104 &mem, sizeof(struct i40e_dma_mem),
105 I40E_NONDMA_TO_NONDMA);
106 sd_entry->u.bp.sd_pd_index = sd_index;
108 /* initialize the sd entry */
109 hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
111 /* increment the ref count */
112 I40E_INC_SD_REFCNT(&hmc_info->sd_table);
114 /* Increment backing page reference count */
115 if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
116 I40E_INC_BP_REFCNT(&sd_entry->u.bp);
118 if (I40E_SUCCESS != ret_code)
119 if (dma_mem_alloc_done)
120 i40e_free_dma_mem(hw, &mem);
126 * i40e_add_pd_table_entry - Adds page descriptor to the specified table
127 * @hw: pointer to our HW structure
128 * @hmc_info: pointer to the HMC configuration information structure
129 * @pd_index: which page descriptor index to manipulate
130 * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
133 * 1. Initializes the pd entry
134 * 2. Adds pd_entry in the pd_table
135 * 3. Mark the entry valid in i40e_hmc_pd_entry structure
136 * 4. Initializes the pd_entry's ref count to 1
138 * 1. The memory for pd should be pinned down, physically contiguous and
139 * aligned on 4K boundary and zeroed memory.
140 * 2. It should be 4K in size.
142 enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
143 struct i40e_hmc_info *hmc_info,
145 struct i40e_dma_mem *rsrc_pg)
147 enum i40e_status_code ret_code = I40E_SUCCESS;
148 struct i40e_hmc_pd_table *pd_table;
149 struct i40e_hmc_pd_entry *pd_entry;
150 struct i40e_dma_mem mem;
151 struct i40e_dma_mem *page = &mem;
152 u32 sd_idx, rel_pd_idx;
156 if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
157 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
158 DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
162 /* find corresponding sd */
163 sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
164 if (I40E_SD_TYPE_PAGED !=
165 hmc_info->sd_table.sd_entry[sd_idx].entry_type)
168 rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
169 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
170 pd_entry = &pd_table->pd_entry[rel_pd_idx];
171 if (!pd_entry->valid) {
173 pd_entry->rsrc_pg = true;
176 /* allocate a 4K backing page */
177 ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
178 I40E_HMC_PAGED_BP_SIZE,
179 I40E_HMC_PD_BP_BUF_ALIGNMENT);
182 pd_entry->rsrc_pg = false;
185 i40e_memcpy(&pd_entry->bp.addr, page,
186 sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
187 pd_entry->bp.sd_pd_index = pd_index;
188 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
189 /* Set page address and valid bit */
190 page_desc = page->pa | 0x1;
192 pd_addr = (u64 *)pd_table->pd_page_addr.va;
193 pd_addr += rel_pd_idx;
195 /* Add the backing page physical address in the pd entry */
196 i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
199 pd_entry->sd_index = sd_idx;
200 pd_entry->valid = true;
201 I40E_INC_PD_REFCNT(pd_table);
203 I40E_INC_BP_REFCNT(&pd_entry->bp);
209 * i40e_remove_pd_bp - remove a backing page from a page descriptor
210 * @hw: pointer to our HW structure
211 * @hmc_info: pointer to the HMC configuration information structure
212 * @idx: the page index
213 * @is_pf: distinguishes a VF from a PF
216 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
217 * (for direct address mode) invalid.
218 * 2. Write to register PMPDINV to invalidate the backing page in FV cache
219 * 3. Decrement the ref count for the pd _entry
221 * 1. Caller can deallocate the memory used by backing storage after this
224 enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
225 struct i40e_hmc_info *hmc_info,
228 enum i40e_status_code ret_code = I40E_SUCCESS;
229 struct i40e_hmc_pd_entry *pd_entry;
230 struct i40e_hmc_pd_table *pd_table;
231 struct i40e_hmc_sd_entry *sd_entry;
232 u32 sd_idx, rel_pd_idx;
235 /* calculate index */
236 sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
237 rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
238 if (sd_idx >= hmc_info->sd_table.sd_cnt) {
239 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
240 DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
243 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
244 if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
245 ret_code = I40E_ERR_INVALID_SD_TYPE;
246 DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
249 /* get the entry and decrease its ref counter */
250 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
251 pd_entry = &pd_table->pd_entry[rel_pd_idx];
252 I40E_DEC_BP_REFCNT(&pd_entry->bp);
253 if (pd_entry->bp.ref_cnt)
256 /* mark the entry invalid */
257 pd_entry->valid = false;
258 I40E_DEC_PD_REFCNT(pd_table);
259 pd_addr = (u64 *)pd_table->pd_page_addr.va;
260 pd_addr += rel_pd_idx;
261 i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
262 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
264 /* free memory here */
265 if (!pd_entry->rsrc_pg)
266 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
267 if (I40E_SUCCESS != ret_code)
269 if (!pd_table->ref_cnt)
270 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
276 * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
277 * @hmc_info: pointer to the HMC configuration information structure
278 * @idx: the page index
280 enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
283 enum i40e_status_code ret_code = I40E_SUCCESS;
284 struct i40e_hmc_sd_entry *sd_entry;
286 /* get the entry and decrease its ref counter */
287 sd_entry = &hmc_info->sd_table.sd_entry[idx];
288 I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
289 if (sd_entry->u.bp.ref_cnt) {
290 ret_code = I40E_ERR_NOT_READY;
293 I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
295 /* mark the entry invalid */
296 sd_entry->valid = false;
302 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
303 * @hw: pointer to our hw struct
304 * @hmc_info: pointer to the HMC configuration information structure
305 * @idx: the page index
306 * @is_pf: used to distinguish between VF and PF
308 enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
309 struct i40e_hmc_info *hmc_info,
312 struct i40e_hmc_sd_entry *sd_entry;
315 return I40E_NOT_SUPPORTED;
317 /* get the entry and decrease its ref counter */
318 sd_entry = &hmc_info->sd_table.sd_entry[idx];
319 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
321 return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
325 * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
326 * @hmc_info: pointer to the HMC configuration information structure
327 * @idx: segment descriptor index to find the relevant page descriptor
329 enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
332 enum i40e_status_code ret_code = I40E_SUCCESS;
333 struct i40e_hmc_sd_entry *sd_entry;
335 sd_entry = &hmc_info->sd_table.sd_entry[idx];
337 if (sd_entry->u.pd_table.ref_cnt) {
338 ret_code = I40E_ERR_NOT_READY;
342 /* mark the entry invalid */
343 sd_entry->valid = false;
345 I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
351 * i40e_remove_pd_page_new - Removes a PD page from sd entry.
352 * @hw: pointer to our hw struct
353 * @hmc_info: pointer to the HMC configuration information structure
354 * @idx: segment descriptor index to find the relevant page descriptor
355 * @is_pf: used to distinguish between VF and PF
357 enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
358 struct i40e_hmc_info *hmc_info,
361 struct i40e_hmc_sd_entry *sd_entry;
364 return I40E_NOT_SUPPORTED;
366 sd_entry = &hmc_info->sd_table.sd_entry[idx];
367 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
369 return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));