6ddf8b3affe419e66c06907d1b54c423d0439352
[dpdk.git] / lib / librte_pmd_i40e / i40e / i40e_hmc.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_osdep.h"
35 #include "i40e_register.h"
36 #include "i40e_status.h"
37 #include "i40e_alloc.h"
38 #include "i40e_hmc.h"
39 #include "i40e_type.h"
40
41 /**
42  * i40e_add_sd_table_entry - Adds a segment descriptor to the table
43  * @hw: pointer to our hw struct
44  * @hmc_info: pointer to the HMC configuration information struct
45  * @sd_index: segment descriptor index to manipulate
46  * @type: what type of segment descriptor we're manipulating
47  * @direct_mode_sz: size to alloc in direct mode
48  **/
49 enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
50                                               struct i40e_hmc_info *hmc_info,
51                                               u32 sd_index,
52                                               enum i40e_sd_entry_type type,
53                                               u64 direct_mode_sz)
54 {
55         enum i40e_status_code ret_code = I40E_SUCCESS;
56         struct i40e_hmc_sd_entry *sd_entry;
57         enum   i40e_memory_type mem_type;
58         bool dma_mem_alloc_done = false;
59         struct i40e_dma_mem mem;
60         u64 alloc_len;
61
62         if (NULL == hmc_info->sd_table.sd_entry) {
63                 ret_code = I40E_ERR_BAD_PTR;
64                 DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
65                 goto exit;
66         }
67
68         if (sd_index >= hmc_info->sd_table.sd_cnt) {
69                 ret_code = I40E_ERR_INVALID_SD_INDEX;
70                 DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
71                 goto exit;
72         }
73
74         sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
75         if (!sd_entry->valid) {
76                 if (I40E_SD_TYPE_PAGED == type) {
77                         mem_type = i40e_mem_pd;
78                         alloc_len = I40E_HMC_PAGED_BP_SIZE;
79                 } else {
80                         mem_type = i40e_mem_bp_jumbo;
81                         alloc_len = direct_mode_sz;
82                 }
83
84                 /* allocate a 4K pd page or 2M backing page */
85                 ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
86                                                  I40E_HMC_PD_BP_BUF_ALIGNMENT);
87                 if (ret_code)
88                         goto exit;
89                 dma_mem_alloc_done = true;
90                 if (I40E_SD_TYPE_PAGED == type) {
91                         ret_code = i40e_allocate_virt_mem(hw,
92                                         &sd_entry->u.pd_table.pd_entry_virt_mem,
93                                         sizeof(struct i40e_hmc_pd_entry) * 512);
94                         if (ret_code)
95                                 goto exit;
96                         sd_entry->u.pd_table.pd_entry =
97                                 (struct i40e_hmc_pd_entry *)
98                                 sd_entry->u.pd_table.pd_entry_virt_mem.va;
99                         i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
100                                     &mem, sizeof(struct i40e_dma_mem),
101                                     I40E_NONDMA_TO_NONDMA);
102                 } else {
103                         i40e_memcpy(&sd_entry->u.bp.addr,
104                                     &mem, sizeof(struct i40e_dma_mem),
105                                     I40E_NONDMA_TO_NONDMA);
106                         sd_entry->u.bp.sd_pd_index = sd_index;
107                 }
108                 /* initialize the sd entry */
109                 hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
110
111                 /* increment the ref count */
112                 I40E_INC_SD_REFCNT(&hmc_info->sd_table);
113         }
114         /* Increment backing page reference count */
115         if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
116                 I40E_INC_BP_REFCNT(&sd_entry->u.bp);
117 exit:
118         if (I40E_SUCCESS != ret_code)
119                 if (dma_mem_alloc_done)
120                         i40e_free_dma_mem(hw, &mem);
121
122         return ret_code;
123 }
124
125 /**
126  * i40e_add_pd_table_entry - Adds page descriptor to the specified table
127  * @hw: pointer to our HW structure
128  * @hmc_info: pointer to the HMC configuration information structure
129  * @pd_index: which page descriptor index to manipulate
130  *
131  * This function:
132  *      1. Initializes the pd entry
133  *      2. Adds pd_entry in the pd_table
134  *      3. Mark the entry valid in i40e_hmc_pd_entry structure
135  *      4. Initializes the pd_entry's ref count to 1
136  * assumptions:
137  *      1. The memory for pd should be pinned down, physically contiguous and
138  *         aligned on 4K boundary and zeroed memory.
139  *      2. It should be 4K in size.
140  **/
141 enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
142                                               struct i40e_hmc_info *hmc_info,
143                                               u32 pd_index)
144 {
145         enum i40e_status_code ret_code = I40E_SUCCESS;
146         struct i40e_hmc_pd_table *pd_table;
147         struct i40e_hmc_pd_entry *pd_entry;
148         struct i40e_dma_mem mem;
149         u32 sd_idx, rel_pd_idx;
150         u64 *pd_addr;
151         u64 page_desc;
152
153         if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
154                 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
155                 DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
156                 goto exit;
157         }
158
159         /* find corresponding sd */
160         sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
161         if (I40E_SD_TYPE_PAGED !=
162             hmc_info->sd_table.sd_entry[sd_idx].entry_type)
163                 goto exit;
164
165         rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
166         pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
167         pd_entry = &pd_table->pd_entry[rel_pd_idx];
168         if (!pd_entry->valid) {
169                 /* allocate a 4K backing page */
170                 ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
171                                                  I40E_HMC_PAGED_BP_SIZE,
172                                                  I40E_HMC_PD_BP_BUF_ALIGNMENT);
173                 if (ret_code)
174                         goto exit;
175
176                 i40e_memcpy(&pd_entry->bp.addr, &mem,
177                             sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
178                 pd_entry->bp.sd_pd_index = pd_index;
179                 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
180                 /* Set page address and valid bit */
181                 page_desc = mem.pa | 0x1;
182
183                 pd_addr = (u64 *)pd_table->pd_page_addr.va;
184                 pd_addr += rel_pd_idx;
185
186                 /* Add the backing page physical address in the pd entry */
187                 i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
188                             I40E_NONDMA_TO_DMA);
189
190                 pd_entry->sd_index = sd_idx;
191                 pd_entry->valid = true;
192                 I40E_INC_PD_REFCNT(pd_table);
193         }
194         I40E_INC_BP_REFCNT(&pd_entry->bp);
195 exit:
196         return ret_code;
197 }
198
199 /**
200  * i40e_remove_pd_bp - remove a backing page from a page descriptor
201  * @hw: pointer to our HW structure
202  * @hmc_info: pointer to the HMC configuration information structure
203  * @idx: the page index
204  * @is_pf: distinguishes a VF from a PF
205  *
206  * This function:
207  *      1. Marks the entry in pd tabe (for paged address mode) or in sd table
208  *         (for direct address mode) invalid.
209  *      2. Write to register PMPDINV to invalidate the backing page in FV cache
210  *      3. Decrement the ref count for the pd _entry
211  * assumptions:
212  *      1. Caller can deallocate the memory used by backing storage after this
213  *         function returns.
214  **/
215 enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
216                                         struct i40e_hmc_info *hmc_info,
217                                         u32 idx)
218 {
219         enum i40e_status_code ret_code = I40E_SUCCESS;
220         struct i40e_hmc_pd_entry *pd_entry;
221         struct i40e_hmc_pd_table *pd_table;
222         struct i40e_hmc_sd_entry *sd_entry;
223         u32 sd_idx, rel_pd_idx;
224         u64 *pd_addr;
225
226         /* calculate index */
227         sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
228         rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
229         if (sd_idx >= hmc_info->sd_table.sd_cnt) {
230                 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
231                 DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
232                 goto exit;
233         }
234         sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
235         if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
236                 ret_code = I40E_ERR_INVALID_SD_TYPE;
237                 DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
238                 goto exit;
239         }
240         /* get the entry and decrease its ref counter */
241         pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
242         pd_entry = &pd_table->pd_entry[rel_pd_idx];
243         I40E_DEC_BP_REFCNT(&pd_entry->bp);
244         if (pd_entry->bp.ref_cnt)
245                 goto exit;
246
247         /* mark the entry invalid */
248         pd_entry->valid = false;
249         I40E_DEC_PD_REFCNT(pd_table);
250         pd_addr = (u64 *)pd_table->pd_page_addr.va;
251         pd_addr += rel_pd_idx;
252         i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
253         I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
254
255         /* free memory here */
256         ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
257         if (I40E_SUCCESS != ret_code)
258                 goto exit;
259         if (!pd_table->ref_cnt)
260                 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
261 exit:
262         return ret_code;
263 }
264
265 /**
266  * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
267  * @hmc_info: pointer to the HMC configuration information structure
268  * @idx: the page index
269  **/
270 enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
271                                              u32 idx)
272 {
273         enum i40e_status_code ret_code = I40E_SUCCESS;
274         struct i40e_hmc_sd_entry *sd_entry;
275
276         /* get the entry and decrease its ref counter */
277         sd_entry = &hmc_info->sd_table.sd_entry[idx];
278         I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
279         if (sd_entry->u.bp.ref_cnt) {
280                 ret_code = I40E_ERR_NOT_READY;
281                 goto exit;
282         }
283         I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
284
285         /* mark the entry invalid */
286         sd_entry->valid = false;
287 exit:
288         return ret_code;
289 }
290
291 /**
292  * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
293  * @hw: pointer to our hw struct
294  * @hmc_info: pointer to the HMC configuration information structure
295  * @idx: the page index
296  * @is_pf: used to distinguish between VF and PF
297  **/
298 enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
299                                             struct i40e_hmc_info *hmc_info,
300                                             u32 idx, bool is_pf)
301 {
302         struct i40e_hmc_sd_entry *sd_entry;
303         enum i40e_status_code ret_code = I40E_SUCCESS;
304
305         /* get the entry and decrease its ref counter */
306         sd_entry = &hmc_info->sd_table.sd_entry[idx];
307         if (is_pf) {
308                 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
309         } else {
310                 ret_code = I40E_NOT_SUPPORTED;
311                 goto exit;
312         }
313         ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
314         if (I40E_SUCCESS != ret_code)
315                 goto exit;
316 exit:
317         return ret_code;
318 }
319
320 /**
321  * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
322  * @hmc_info: pointer to the HMC configuration information structure
323  * @idx: segment descriptor index to find the relevant page descriptor
324  **/
325 enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
326                                                u32 idx)
327 {
328         enum i40e_status_code ret_code = I40E_SUCCESS;
329         struct i40e_hmc_sd_entry *sd_entry;
330
331         sd_entry = &hmc_info->sd_table.sd_entry[idx];
332
333         if (sd_entry->u.pd_table.ref_cnt) {
334                 ret_code = I40E_ERR_NOT_READY;
335                 goto exit;
336         }
337
338         /* mark the entry invalid */
339         sd_entry->valid = false;
340
341         I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
342 exit:
343         return ret_code;
344 }
345
346 /**
347  * i40e_remove_pd_page_new - Removes a PD page from sd entry.
348  * @hw: pointer to our hw struct
349  * @hmc_info: pointer to the HMC configuration information structure
350  * @idx: segment descriptor index to find the relevant page descriptor
351  * @is_pf: used to distinguish between VF and PF
352  **/
353 enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
354                                               struct i40e_hmc_info *hmc_info,
355                                               u32 idx, bool is_pf)
356 {
357         enum i40e_status_code ret_code = I40E_SUCCESS;
358         struct i40e_hmc_sd_entry *sd_entry;
359
360         sd_entry = &hmc_info->sd_table.sd_entry[idx];
361         if (is_pf) {
362                 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
363         } else {
364                 ret_code = I40E_NOT_SUPPORTED;
365                 goto exit;
366         }
367         /* free memory here */
368         ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
369         if (I40E_SUCCESS != ret_code)
370                 goto exit;
371 exit:
372         return ret_code;
373 }