1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_osdep.h"
35 #include "i40e_register.h"
36 #include "i40e_status.h"
37 #include "i40e_alloc.h"
39 #include "i40e_type.h"
42 * i40e_add_sd_table_entry - Adds a segment descriptor to the table
43 * @hw: pointer to our hw struct
44 * @hmc_info: pointer to the HMC configuration information struct
45 * @sd_index: segment descriptor index to manipulate
46 * @type: what type of segment descriptor we're manipulating
47 * @direct_mode_sz: size to alloc in direct mode
49 enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
50 struct i40e_hmc_info *hmc_info,
52 enum i40e_sd_entry_type type,
55 enum i40e_status_code ret_code = I40E_SUCCESS;
56 struct i40e_hmc_sd_entry *sd_entry;
57 enum i40e_memory_type mem_type;
58 bool dma_mem_alloc_done = false;
59 struct i40e_dma_mem mem;
62 if (NULL == hmc_info->sd_table.sd_entry) {
63 ret_code = I40E_ERR_BAD_PTR;
64 DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
68 if (sd_index >= hmc_info->sd_table.sd_cnt) {
69 ret_code = I40E_ERR_INVALID_SD_INDEX;
70 DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
74 sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
75 if (!sd_entry->valid) {
76 if (I40E_SD_TYPE_PAGED == type) {
77 mem_type = i40e_mem_pd;
78 alloc_len = I40E_HMC_PAGED_BP_SIZE;
80 mem_type = i40e_mem_bp_jumbo;
81 alloc_len = direct_mode_sz;
84 /* allocate a 4K pd page or 2M backing page */
85 ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
86 I40E_HMC_PD_BP_BUF_ALIGNMENT);
89 dma_mem_alloc_done = true;
90 if (I40E_SD_TYPE_PAGED == type) {
91 ret_code = i40e_allocate_virt_mem(hw,
92 &sd_entry->u.pd_table.pd_entry_virt_mem,
93 sizeof(struct i40e_hmc_pd_entry) * 512);
96 sd_entry->u.pd_table.pd_entry =
97 (struct i40e_hmc_pd_entry *)
98 sd_entry->u.pd_table.pd_entry_virt_mem.va;
99 i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
100 &mem, sizeof(struct i40e_dma_mem),
101 I40E_NONDMA_TO_NONDMA);
103 i40e_memcpy(&sd_entry->u.bp.addr,
104 &mem, sizeof(struct i40e_dma_mem),
105 I40E_NONDMA_TO_NONDMA);
106 sd_entry->u.bp.sd_pd_index = sd_index;
108 /* initialize the sd entry */
109 hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
111 /* increment the ref count */
112 I40E_INC_SD_REFCNT(&hmc_info->sd_table);
114 /* Increment backing page reference count */
115 if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
116 I40E_INC_BP_REFCNT(&sd_entry->u.bp);
118 if (I40E_SUCCESS != ret_code)
119 if (dma_mem_alloc_done)
120 i40e_free_dma_mem(hw, &mem);
126 * i40e_add_pd_table_entry - Adds page descriptor to the specified table
127 * @hw: pointer to our HW structure
128 * @hmc_info: pointer to the HMC configuration information structure
129 * @pd_index: which page descriptor index to manipulate
130 * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
133 * 1. Initializes the pd entry
134 * 2. Adds pd_entry in the pd_table
135 * 3. Mark the entry valid in i40e_hmc_pd_entry structure
136 * 4. Initializes the pd_entry's ref count to 1
138 * 1. The memory for pd should be pinned down, physically contiguous and
139 * aligned on 4K boundary and zeroed memory.
140 * 2. It should be 4K in size.
142 enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
143 struct i40e_hmc_info *hmc_info,
145 struct i40e_dma_mem *rsrc_pg)
147 enum i40e_status_code ret_code = I40E_SUCCESS;
148 struct i40e_hmc_pd_table *pd_table;
149 struct i40e_hmc_pd_entry *pd_entry;
150 struct i40e_dma_mem mem;
151 struct i40e_dma_mem *page = &mem;
152 u32 sd_idx, rel_pd_idx;
156 if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
157 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
158 DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
162 /* find corresponding sd */
163 sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
164 if (I40E_SD_TYPE_PAGED !=
165 hmc_info->sd_table.sd_entry[sd_idx].entry_type)
168 rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
169 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
170 pd_entry = &pd_table->pd_entry[rel_pd_idx];
171 if (!pd_entry->valid) {
173 pd_entry->rsrc_pg = true;
176 /* allocate a 4K backing page */
177 ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
178 I40E_HMC_PAGED_BP_SIZE,
179 I40E_HMC_PD_BP_BUF_ALIGNMENT);
182 pd_entry->rsrc_pg = false;
185 i40e_memcpy(&pd_entry->bp.addr, page,
186 sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
187 pd_entry->bp.sd_pd_index = pd_index;
188 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
189 /* Set page address and valid bit */
190 page_desc = page->pa | 0x1;
192 pd_addr = (u64 *)pd_table->pd_page_addr.va;
193 pd_addr += rel_pd_idx;
195 /* Add the backing page physical address in the pd entry */
196 i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
199 pd_entry->sd_index = sd_idx;
200 pd_entry->valid = true;
201 I40E_INC_PD_REFCNT(pd_table);
203 I40E_INC_BP_REFCNT(&pd_entry->bp);
209 * i40e_remove_pd_bp - remove a backing page from a page descriptor
210 * @hw: pointer to our HW structure
211 * @hmc_info: pointer to the HMC configuration information structure
212 * @idx: the page index
215 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
216 * (for direct address mode) invalid.
217 * 2. Write to register PMPDINV to invalidate the backing page in FV cache
218 * 3. Decrement the ref count for the pd _entry
220 * 1. Caller can deallocate the memory used by backing storage after this
223 enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
224 struct i40e_hmc_info *hmc_info,
227 enum i40e_status_code ret_code = I40E_SUCCESS;
228 struct i40e_hmc_pd_entry *pd_entry;
229 struct i40e_hmc_pd_table *pd_table;
230 struct i40e_hmc_sd_entry *sd_entry;
231 u32 sd_idx, rel_pd_idx;
234 /* calculate index */
235 sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
236 rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
237 if (sd_idx >= hmc_info->sd_table.sd_cnt) {
238 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
239 DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
242 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
243 if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
244 ret_code = I40E_ERR_INVALID_SD_TYPE;
245 DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
248 /* get the entry and decrease its ref counter */
249 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
250 pd_entry = &pd_table->pd_entry[rel_pd_idx];
251 I40E_DEC_BP_REFCNT(&pd_entry->bp);
252 if (pd_entry->bp.ref_cnt)
255 /* mark the entry invalid */
256 pd_entry->valid = false;
257 I40E_DEC_PD_REFCNT(pd_table);
258 pd_addr = (u64 *)pd_table->pd_page_addr.va;
259 pd_addr += rel_pd_idx;
260 i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
261 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
263 /* free memory here */
264 if (!pd_entry->rsrc_pg)
265 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
266 if (I40E_SUCCESS != ret_code)
268 if (!pd_table->ref_cnt)
269 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
275 * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
276 * @hmc_info: pointer to the HMC configuration information structure
277 * @idx: the page index
279 enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
282 enum i40e_status_code ret_code = I40E_SUCCESS;
283 struct i40e_hmc_sd_entry *sd_entry;
285 /* get the entry and decrease its ref counter */
286 sd_entry = &hmc_info->sd_table.sd_entry[idx];
287 I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
288 if (sd_entry->u.bp.ref_cnt) {
289 ret_code = I40E_ERR_NOT_READY;
292 I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
294 /* mark the entry invalid */
295 sd_entry->valid = false;
301 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
302 * @hw: pointer to our hw struct
303 * @hmc_info: pointer to the HMC configuration information structure
304 * @idx: the page index
305 * @is_pf: used to distinguish between VF and PF
307 enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
308 struct i40e_hmc_info *hmc_info,
311 struct i40e_hmc_sd_entry *sd_entry;
314 return I40E_NOT_SUPPORTED;
316 /* get the entry and decrease its ref counter */
317 sd_entry = &hmc_info->sd_table.sd_entry[idx];
318 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
320 return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
324 * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
325 * @hmc_info: pointer to the HMC configuration information structure
326 * @idx: segment descriptor index to find the relevant page descriptor
328 enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
331 enum i40e_status_code ret_code = I40E_SUCCESS;
332 struct i40e_hmc_sd_entry *sd_entry;
334 sd_entry = &hmc_info->sd_table.sd_entry[idx];
336 if (sd_entry->u.pd_table.ref_cnt) {
337 ret_code = I40E_ERR_NOT_READY;
341 /* mark the entry invalid */
342 sd_entry->valid = false;
344 I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
350 * i40e_remove_pd_page_new - Removes a PD page from sd entry.
351 * @hw: pointer to our hw struct
352 * @hmc_info: pointer to the HMC configuration information structure
353 * @idx: segment descriptor index to find the relevant page descriptor
354 * @is_pf: used to distinguish between VF and PF
356 enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
357 struct i40e_hmc_info *hmc_info,
360 struct i40e_hmc_sd_entry *sd_entry;
363 return I40E_NOT_SUPPORTED;
365 sd_entry = &hmc_info->sd_table.sd_entry[idx];
366 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
368 return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));