1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_osdep.h"
35 #include "i40e_register.h"
36 #include "i40e_type.h"
38 #include "i40e_lan_hmc.h"
39 #include "i40e_prototype.h"
41 /* lan specific interface functions */
44 * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
45 * @offset: base address offset needing alignment
47 * Aligns the layer 2 function private memory so it's 512-byte aligned.
49 STATIC u64 i40e_align_l2obj_base(u64 offset)
51 u64 aligned_offset = offset;
53 if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
54 aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
55 (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
57 return aligned_offset;
61 * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
62 * @txq_num: number of Tx queues needing backing context
63 * @rxq_num: number of Rx queues needing backing context
64 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
65 * @fcoe_filt_num: number of FCoE filters needing backing context
67 * Calculates the maximum amount of memory for the function required, based
68 * on the number of resources it must provide context for.
70 u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
71 u32 fcoe_cntx_num, u32 fcoe_filt_num)
75 fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
76 fpm_size = i40e_align_l2obj_base(fpm_size);
78 fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
79 fpm_size = i40e_align_l2obj_base(fpm_size);
81 fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
82 fpm_size = i40e_align_l2obj_base(fpm_size);
84 fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
85 fpm_size = i40e_align_l2obj_base(fpm_size);
91 * i40e_init_lan_hmc - initialize i40e_hmc_info struct
92 * @hw: pointer to the HW structure
93 * @txq_num: number of Tx queues needing backing context
94 * @rxq_num: number of Rx queues needing backing context
95 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
96 * @fcoe_filt_num: number of FCoE filters needing backing context
98 * This function will be called once per physical function initialization.
99 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
100 * the driver's provided input, as well as information from the HMC itself
104 * - HMC Resource Profile has been selected before calling this function.
106 enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
107 u32 rxq_num, u32 fcoe_cntx_num,
110 struct i40e_hmc_obj_info *obj, *full_obj;
111 enum i40e_status_code ret_code = I40E_SUCCESS;
115 hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
116 hw->hmc.hmc_fn_id = hw->pf_id;
118 /* allocate memory for hmc_obj */
119 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
120 sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
122 goto init_lan_hmc_out;
123 hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
124 hw->hmc.hmc_obj_virt_mem.va;
126 /* The full object will be used to create the LAN HMC SD */
127 full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
128 full_obj->max_cnt = 0;
133 /* Tx queue context information */
134 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
135 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
138 size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
139 obj->size = (u64)1 << size_exp;
141 /* validate values requested by driver don't exceed HMC capacity */
142 if (txq_num > obj->max_cnt) {
143 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
144 DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
145 txq_num, obj->max_cnt, ret_code);
146 goto init_lan_hmc_out;
149 /* aggregate values into the full LAN object for later */
150 full_obj->max_cnt += obj->max_cnt;
151 full_obj->cnt += obj->cnt;
153 /* Rx queue context information */
154 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
155 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
157 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
158 (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
159 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
160 obj->base = i40e_align_l2obj_base(obj->base);
161 size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
162 obj->size = (u64)1 << size_exp;
164 /* validate values requested by driver don't exceed HMC capacity */
165 if (rxq_num > obj->max_cnt) {
166 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
167 DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
168 rxq_num, obj->max_cnt, ret_code);
169 goto init_lan_hmc_out;
172 /* aggregate values into the full LAN object for later */
173 full_obj->max_cnt += obj->max_cnt;
174 full_obj->cnt += obj->cnt;
176 /* FCoE context information */
177 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
178 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
179 obj->cnt = fcoe_cntx_num;
180 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
181 (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
182 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
183 obj->base = i40e_align_l2obj_base(obj->base);
184 size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
185 obj->size = (u64)1 << size_exp;
187 /* validate values requested by driver don't exceed HMC capacity */
188 if (fcoe_cntx_num > obj->max_cnt) {
189 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
190 DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
191 fcoe_cntx_num, obj->max_cnt, ret_code);
192 goto init_lan_hmc_out;
195 /* aggregate values into the full LAN object for later */
196 full_obj->max_cnt += obj->max_cnt;
197 full_obj->cnt += obj->cnt;
199 /* FCoE filter information */
200 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
201 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
202 obj->cnt = fcoe_filt_num;
203 obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
204 (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
205 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
206 obj->base = i40e_align_l2obj_base(obj->base);
207 size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
208 obj->size = (u64)1 << size_exp;
210 /* validate values requested by driver don't exceed HMC capacity */
211 if (fcoe_filt_num > obj->max_cnt) {
212 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
213 DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
214 fcoe_filt_num, obj->max_cnt, ret_code);
215 goto init_lan_hmc_out;
218 /* aggregate values into the full LAN object for later */
219 full_obj->max_cnt += obj->max_cnt;
220 full_obj->cnt += obj->cnt;
222 hw->hmc.first_sd_index = 0;
223 hw->hmc.sd_table.ref_cnt = 0;
224 l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
226 if (NULL == hw->hmc.sd_table.sd_entry) {
227 hw->hmc.sd_table.sd_cnt = (u32)
228 (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
229 I40E_HMC_DIRECT_BP_SIZE;
231 /* allocate the sd_entry members in the sd_table */
232 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
233 (sizeof(struct i40e_hmc_sd_entry) *
234 hw->hmc.sd_table.sd_cnt));
236 goto init_lan_hmc_out;
237 hw->hmc.sd_table.sd_entry =
238 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
240 /* store in the LAN full object for later */
241 full_obj->size = l2fpm_size;
248 * i40e_remove_pd_page - Remove a page from the page descriptor table
249 * @hw: pointer to the HW structure
250 * @hmc_info: pointer to the HMC configuration information structure
251 * @idx: segment descriptor index to find the relevant page descriptor
254 * 1. Marks the entry in pd table (for paged address mode) invalid
255 * 2. write to register PMPDINV to invalidate the backing page in FV cache
256 * 3. Decrement the ref count for pd_entry
258 * 1. caller can deallocate the memory used by pd after this function
261 STATIC enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
262 struct i40e_hmc_info *hmc_info,
265 enum i40e_status_code ret_code = I40E_SUCCESS;
267 if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
268 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
274 * i40e_remove_sd_bp - remove a backing page from a segment descriptor
275 * @hw: pointer to our HW structure
276 * @hmc_info: pointer to the HMC configuration information structure
277 * @idx: the page index
280 * 1. Marks the entry in sd table (for direct address mode) invalid
281 * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
282 * to 0) and PMSDDATAHIGH to invalidate the sd page
283 * 3. Decrement the ref count for the sd_entry
285 * 1. caller can deallocate the memory used by backing storage after this
288 STATIC enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
289 struct i40e_hmc_info *hmc_info,
292 enum i40e_status_code ret_code = I40E_SUCCESS;
294 if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
295 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
301 * i40e_create_lan_hmc_object - allocate backing store for hmc objects
302 * @hw: pointer to the HW structure
303 * @info: pointer to i40e_hmc_create_obj_info struct
305 * This will allocate memory for PDs and backing pages and populate
306 * the sd and pd entries.
308 enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
309 struct i40e_hmc_lan_create_obj_info *info)
311 enum i40e_status_code ret_code = I40E_SUCCESS;
312 struct i40e_hmc_sd_entry *sd_entry;
313 u32 pd_idx1 = 0, pd_lmt1 = 0;
314 u32 pd_idx = 0, pd_lmt = 0;
315 bool pd_error = false;
321 ret_code = I40E_ERR_BAD_PTR;
322 DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
325 if (NULL == info->hmc_info) {
326 ret_code = I40E_ERR_BAD_PTR;
327 DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
330 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
331 ret_code = I40E_ERR_BAD_PTR;
332 DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
336 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
337 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
338 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
342 if ((info->start_idx + info->count) >
343 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
344 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
345 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
350 /* find sd index and limit */
351 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
352 info->start_idx, info->count,
354 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
355 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
356 ret_code = I40E_ERR_INVALID_SD_INDEX;
360 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
361 info->start_idx, info->count, &pd_idx,
364 /* This is to cover for cases where you may not want to have an SD with
365 * the full 2M memory but something smaller. By not filling out any
366 * size, the function will default the SD size to be 2M.
368 if (info->direct_mode_sz == 0)
369 sd_size = I40E_HMC_DIRECT_BP_SIZE;
371 sd_size = info->direct_mode_sz;
373 /* check if all the sds are valid. If not, allocate a page and
376 for (j = sd_idx; j < sd_lmt; j++) {
377 /* update the sd table entry */
378 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
381 if (I40E_SUCCESS != ret_code)
383 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
384 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
385 /* check if all the pds in this sd are valid. If not,
386 * allocate a page and initialize it.
389 /* find pd_idx and pd_lmt in this sd */
390 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
391 pd_lmt1 = min(pd_lmt,
392 ((j + 1) * I40E_HMC_MAX_BP_COUNT));
393 for (i = pd_idx1; i < pd_lmt1; i++) {
394 /* update the pd table entry */
395 ret_code = i40e_add_pd_table_entry(hw,
398 if (I40E_SUCCESS != ret_code) {
404 /* remove the backing pages from pd_idx1 to i */
405 while (i && (i > pd_idx1)) {
406 i40e_remove_pd_bp(hw, info->hmc_info,
412 if (!sd_entry->valid) {
413 sd_entry->valid = true;
414 switch (sd_entry->entry_type) {
415 case I40E_SD_TYPE_PAGED:
416 I40E_SET_PF_SD_ENTRY(hw,
417 sd_entry->u.pd_table.pd_page_addr.pa,
418 j, sd_entry->entry_type);
420 case I40E_SD_TYPE_DIRECT:
421 I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
422 j, sd_entry->entry_type);
425 ret_code = I40E_ERR_INVALID_SD_TYPE;
433 /* cleanup for sd entries from j to sd_idx */
434 while (j && (j > sd_idx)) {
435 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
436 switch (sd_entry->entry_type) {
437 case I40E_SD_TYPE_PAGED:
438 pd_idx1 = max(pd_idx,
439 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
440 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
441 for (i = pd_idx1; i < pd_lmt1; i++) {
442 i40e_remove_pd_bp(hw, info->hmc_info, i);
444 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
446 case I40E_SD_TYPE_DIRECT:
447 i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
450 ret_code = I40E_ERR_INVALID_SD_TYPE;
460 * i40e_configure_lan_hmc - prepare the HMC backing store
461 * @hw: pointer to the hw structure
462 * @model: the model for the layout of the SD/PD tables
464 * - This function will be called once per physical function initialization.
465 * - This function will be called after i40e_init_lan_hmc() and before
466 * any LAN/FCoE HMC objects can be created.
468 enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
469 enum i40e_hmc_model model)
471 struct i40e_hmc_lan_create_obj_info info;
472 u8 hmc_fn_id = hw->hmc.hmc_fn_id;
473 struct i40e_hmc_obj_info *obj;
474 enum i40e_status_code ret_code = I40E_SUCCESS;
476 /* Initialize part of the create object info struct */
477 info.hmc_info = &hw->hmc;
478 info.rsrc_type = I40E_HMC_LAN_FULL;
480 info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
482 /* Build the SD entry for the LAN objects */
484 case I40E_HMC_MODEL_DIRECT_PREFERRED:
485 case I40E_HMC_MODEL_DIRECT_ONLY:
486 info.entry_type = I40E_SD_TYPE_DIRECT;
487 /* Make one big object, a single SD */
489 ret_code = i40e_create_lan_hmc_object(hw, &info);
490 if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
492 else if (ret_code != I40E_SUCCESS)
493 goto configure_lan_hmc_out;
494 /* else clause falls through the break */
496 case I40E_HMC_MODEL_PAGED_ONLY:
498 info.entry_type = I40E_SD_TYPE_PAGED;
499 /* Make one big object in the PD table */
501 ret_code = i40e_create_lan_hmc_object(hw, &info);
502 if (ret_code != I40E_SUCCESS)
503 goto configure_lan_hmc_out;
506 /* unsupported type */
507 ret_code = I40E_ERR_INVALID_SD_TYPE;
508 DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
510 goto configure_lan_hmc_out;
513 /* Configure and program the FPM registers so objects can be created */
516 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
517 wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
518 (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
519 wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
522 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
523 wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
524 (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
525 wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
528 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
529 wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
530 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
531 wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
534 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
535 wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
536 (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
537 wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
539 configure_lan_hmc_out:
544 * i40e_delete_hmc_object - remove hmc objects
545 * @hw: pointer to the HW structure
546 * @info: pointer to i40e_hmc_delete_obj_info struct
548 * This will de-populate the SDs and PDs. It frees
549 * the memory for PDS and backing storage. After this function is returned,
550 * caller should deallocate memory allocated previously for
551 * book-keeping information about PDs and backing storage.
553 enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
554 struct i40e_hmc_lan_delete_obj_info *info)
556 enum i40e_status_code ret_code = I40E_SUCCESS;
557 struct i40e_hmc_pd_table *pd_table;
558 u32 pd_idx, pd_lmt, rel_pd_idx;
563 ret_code = I40E_ERR_BAD_PTR;
564 DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
567 if (NULL == info->hmc_info) {
568 ret_code = I40E_ERR_BAD_PTR;
569 DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
572 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
573 ret_code = I40E_ERR_BAD_PTR;
574 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
578 if (NULL == info->hmc_info->sd_table.sd_entry) {
579 ret_code = I40E_ERR_BAD_PTR;
580 DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
584 if (NULL == info->hmc_info->hmc_obj) {
585 ret_code = I40E_ERR_BAD_PTR;
586 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
589 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
590 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
591 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
596 if ((info->start_idx + info->count) >
597 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
598 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
599 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
604 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
605 info->start_idx, info->count, &pd_idx,
608 for (j = pd_idx; j < pd_lmt; j++) {
609 sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
611 if (I40E_SD_TYPE_PAGED !=
612 info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
615 rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
618 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
619 if (pd_table->pd_entry[rel_pd_idx].valid) {
620 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
621 if (I40E_SUCCESS != ret_code)
626 /* find sd index and limit */
627 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
628 info->start_idx, info->count,
630 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
631 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
632 ret_code = I40E_ERR_INVALID_SD_INDEX;
636 for (i = sd_idx; i < sd_lmt; i++) {
637 if (!info->hmc_info->sd_table.sd_entry[i].valid)
639 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
640 case I40E_SD_TYPE_DIRECT:
641 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
642 if (I40E_SUCCESS != ret_code)
645 case I40E_SD_TYPE_PAGED:
646 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
647 if (I40E_SUCCESS != ret_code)
659 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
660 * @hw: pointer to the hw structure
662 * This must be called by drivers as they are shutting down and being
663 * removed from the OS.
665 enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
667 struct i40e_hmc_lan_delete_obj_info info;
668 enum i40e_status_code ret_code;
670 info.hmc_info = &hw->hmc;
671 info.rsrc_type = I40E_HMC_LAN_FULL;
675 /* delete the object */
676 ret_code = i40e_delete_lan_hmc_object(hw, &info);
678 /* free the SD table entry for LAN */
679 i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
680 hw->hmc.sd_table.sd_cnt = 0;
681 hw->hmc.sd_table.sd_entry = NULL;
683 /* free memory used for hmc_obj */
684 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
685 hw->hmc.hmc_obj = NULL;
690 #define I40E_HMC_STORE(_struct, _ele) \
691 offsetof(struct _struct, _ele), \
692 FIELD_SIZEOF(struct _struct, _ele)
694 struct i40e_context_ele {
701 /* LAN Tx Queue Context */
702 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
703 /* Field Width LSB */
704 {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
705 {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
706 {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
707 {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
708 {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
709 {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
710 {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
711 {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
713 {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
714 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
715 {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
716 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
717 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
718 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
719 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
721 {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
722 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
723 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
727 /* LAN Rx Queue Context */
728 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
729 /* Field Width LSB */
730 { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
731 { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
732 { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
733 { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
734 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
735 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
736 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
737 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
738 { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
739 { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
740 { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
741 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
746 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
750 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
755 * i40e_write_byte - replace HMC context byte
756 * @hmc_bits: pointer to the HMC memory
757 * @ce_info: a description of the struct to be read from
758 * @src: the struct to be read from
760 static void i40e_write_byte(u8 *hmc_bits,
761 struct i40e_context_ele *ce_info,
764 u8 src_byte, dest_byte, mask;
768 /* copy from the next struct field */
769 from = src + ce_info->offset;
771 /* prepare the bits and mask */
772 shift_width = ce_info->lsb % 8;
773 mask = ((u8)1 << ce_info->width) - 1;
778 /* shift to correct alignment */
779 mask <<= shift_width;
780 src_byte <<= shift_width;
782 /* get the current bits from the target bit string */
783 dest = hmc_bits + (ce_info->lsb / 8);
785 i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
787 dest_byte &= ~mask; /* get the bits not changing */
788 dest_byte |= src_byte; /* add in the new bits */
790 /* put it all back */
791 i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
795 * i40e_write_word - replace HMC context word
796 * @hmc_bits: pointer to the HMC memory
797 * @ce_info: a description of the struct to be read from
798 * @src: the struct to be read from
800 static void i40e_write_word(u8 *hmc_bits,
801 struct i40e_context_ele *ce_info,
809 /* copy from the next struct field */
810 from = src + ce_info->offset;
812 /* prepare the bits and mask */
813 shift_width = ce_info->lsb % 8;
814 mask = ((u16)1 << ce_info->width) - 1;
816 /* don't swizzle the bits until after the mask because the mask bits
817 * will be in a different bit position on big endian machines
819 src_word = *(u16 *)from;
822 /* shift to correct alignment */
823 mask <<= shift_width;
824 src_word <<= shift_width;
826 /* get the current bits from the target bit string */
827 dest = hmc_bits + (ce_info->lsb / 8);
829 i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
831 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
832 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
834 /* put it all back */
835 i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
839 * i40e_write_dword - replace HMC context dword
840 * @hmc_bits: pointer to the HMC memory
841 * @ce_info: a description of the struct to be read from
842 * @src: the struct to be read from
844 static void i40e_write_dword(u8 *hmc_bits,
845 struct i40e_context_ele *ce_info,
853 /* copy from the next struct field */
854 from = src + ce_info->offset;
856 /* prepare the bits and mask */
857 shift_width = ce_info->lsb % 8;
859 /* if the field width is exactly 32 on an x86 machine, then the shift
860 * operation will not work because the SHL instructions count is masked
861 * to 5 bits so the shift will do nothing
863 if (ce_info->width < 32)
864 mask = ((u32)1 << ce_info->width) - 1;
868 /* don't swizzle the bits until after the mask because the mask bits
869 * will be in a different bit position on big endian machines
871 src_dword = *(u32 *)from;
874 /* shift to correct alignment */
875 mask <<= shift_width;
876 src_dword <<= shift_width;
878 /* get the current bits from the target bit string */
879 dest = hmc_bits + (ce_info->lsb / 8);
881 i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
883 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
884 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
886 /* put it all back */
887 i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
891 * i40e_write_qword - replace HMC context qword
892 * @hmc_bits: pointer to the HMC memory
893 * @ce_info: a description of the struct to be read from
894 * @src: the struct to be read from
896 static void i40e_write_qword(u8 *hmc_bits,
897 struct i40e_context_ele *ce_info,
905 /* copy from the next struct field */
906 from = src + ce_info->offset;
908 /* prepare the bits and mask */
909 shift_width = ce_info->lsb % 8;
911 /* if the field width is exactly 64 on an x86 machine, then the shift
912 * operation will not work because the SHL instructions count is masked
913 * to 6 bits so the shift will do nothing
915 if (ce_info->width < 64)
916 mask = ((u64)1 << ce_info->width) - 1;
920 /* don't swizzle the bits until after the mask because the mask bits
921 * will be in a different bit position on big endian machines
923 src_qword = *(u64 *)from;
926 /* shift to correct alignment */
927 mask <<= shift_width;
928 src_qword <<= shift_width;
930 /* get the current bits from the target bit string */
931 dest = hmc_bits + (ce_info->lsb / 8);
933 i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
935 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
936 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
938 /* put it all back */
939 i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
943 * i40e_read_byte - read HMC context byte into struct
944 * @hmc_bits: pointer to the HMC memory
945 * @ce_info: a description of the struct to be filled
946 * @dest: the struct to be filled
948 static void i40e_read_byte(u8 *hmc_bits,
949 struct i40e_context_ele *ce_info,
956 /* prepare the bits and mask */
957 shift_width = ce_info->lsb % 8;
958 mask = ((u8)1 << ce_info->width) - 1;
960 /* shift to correct alignment */
961 mask <<= shift_width;
963 /* get the current bits from the src bit string */
964 src = hmc_bits + (ce_info->lsb / 8);
966 i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
968 dest_byte &= ~(mask);
970 dest_byte >>= shift_width;
972 /* get the address from the struct field */
973 target = dest + ce_info->offset;
975 /* put it back in the struct */
976 i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
980 * i40e_read_word - read HMC context word into struct
981 * @hmc_bits: pointer to the HMC memory
982 * @ce_info: a description of the struct to be filled
983 * @dest: the struct to be filled
985 static void i40e_read_word(u8 *hmc_bits,
986 struct i40e_context_ele *ce_info,
994 /* prepare the bits and mask */
995 shift_width = ce_info->lsb % 8;
996 mask = ((u16)1 << ce_info->width) - 1;
998 /* shift to correct alignment */
999 mask <<= shift_width;
1001 /* get the current bits from the src bit string */
1002 src = hmc_bits + (ce_info->lsb / 8);
1004 i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1006 /* the data in the memory is stored as little endian so mask it
1009 src_word &= ~(CPU_TO_LE16(mask));
1011 /* get the data back into host order before shifting */
1012 dest_word = LE16_TO_CPU(src_word);
1014 dest_word >>= shift_width;
1016 /* get the address from the struct field */
1017 target = dest + ce_info->offset;
1019 /* put it back in the struct */
1020 i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1024 * i40e_read_dword - read HMC context dword into struct
1025 * @hmc_bits: pointer to the HMC memory
1026 * @ce_info: a description of the struct to be filled
1027 * @dest: the struct to be filled
1029 static void i40e_read_dword(u8 *hmc_bits,
1030 struct i40e_context_ele *ce_info,
1033 u32 dest_dword, mask;
1038 /* prepare the bits and mask */
1039 shift_width = ce_info->lsb % 8;
1041 /* if the field width is exactly 32 on an x86 machine, then the shift
1042 * operation will not work because the SHL instructions count is masked
1043 * to 5 bits so the shift will do nothing
1045 if (ce_info->width < 32)
1046 mask = ((u32)1 << ce_info->width) - 1;
1050 /* shift to correct alignment */
1051 mask <<= shift_width;
1053 /* get the current bits from the src bit string */
1054 src = hmc_bits + (ce_info->lsb / 8);
1056 i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1058 /* the data in the memory is stored as little endian so mask it
1061 src_dword &= ~(CPU_TO_LE32(mask));
1063 /* get the data back into host order before shifting */
1064 dest_dword = LE32_TO_CPU(src_dword);
1066 dest_dword >>= shift_width;
1068 /* get the address from the struct field */
1069 target = dest + ce_info->offset;
1071 /* put it back in the struct */
1072 i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1073 I40E_NONDMA_TO_DMA);
1077 * i40e_read_qword - read HMC context qword into struct
1078 * @hmc_bits: pointer to the HMC memory
1079 * @ce_info: a description of the struct to be filled
1080 * @dest: the struct to be filled
1082 static void i40e_read_qword(u8 *hmc_bits,
1083 struct i40e_context_ele *ce_info,
1086 u64 dest_qword, mask;
1091 /* prepare the bits and mask */
1092 shift_width = ce_info->lsb % 8;
1094 /* if the field width is exactly 64 on an x86 machine, then the shift
1095 * operation will not work because the SHL instructions count is masked
1096 * to 6 bits so the shift will do nothing
1098 if (ce_info->width < 64)
1099 mask = ((u64)1 << ce_info->width) - 1;
1103 /* shift to correct alignment */
1104 mask <<= shift_width;
1106 /* get the current bits from the src bit string */
1107 src = hmc_bits + (ce_info->lsb / 8);
1109 i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1111 /* the data in the memory is stored as little endian so mask it
1114 src_qword &= ~(CPU_TO_LE64(mask));
1116 /* get the data back into host order before shifting */
1117 dest_qword = LE64_TO_CPU(src_qword);
1119 dest_qword >>= shift_width;
1121 /* get the address from the struct field */
1122 target = dest + ce_info->offset;
1124 /* put it back in the struct */
1125 i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1126 I40E_NONDMA_TO_DMA);
1130 * i40e_get_hmc_context - extract HMC context bits
1131 * @context_bytes: pointer to the context bit array
1132 * @ce_info: a description of the struct to be filled
1133 * @dest: the struct to be filled
1135 static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1136 struct i40e_context_ele *ce_info,
1141 for (f = 0; ce_info[f].width != 0; f++) {
1142 switch (ce_info[f].size_of) {
1144 i40e_read_byte(context_bytes, &ce_info[f], dest);
1147 i40e_read_word(context_bytes, &ce_info[f], dest);
1150 i40e_read_dword(context_bytes, &ce_info[f], dest);
1153 i40e_read_qword(context_bytes, &ce_info[f], dest);
1156 /* nothing to do, just keep going */
1161 return I40E_SUCCESS;
1165 * i40e_clear_hmc_context - zero out the HMC context bits
1166 * @hw: the hardware struct
1167 * @context_bytes: pointer to the context bit array (DMA memory)
1168 * @hmc_type: the type of HMC resource
1170 static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1172 enum i40e_hmc_lan_rsrc_type hmc_type)
1174 /* clean the bit array */
1175 i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1178 return I40E_SUCCESS;
1182 * i40e_set_hmc_context - replace HMC context bits
1183 * @context_bytes: pointer to the context bit array
1184 * @ce_info: a description of the struct to be filled
1185 * @dest: the struct to be filled
1187 static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1188 struct i40e_context_ele *ce_info,
1193 for (f = 0; ce_info[f].width != 0; f++) {
1195 /* we have to deal with each element of the HMC using the
1196 * correct size so that we are correct regardless of the
1197 * endianness of the machine
1199 switch (ce_info[f].size_of) {
1201 i40e_write_byte(context_bytes, &ce_info[f], dest);
1204 i40e_write_word(context_bytes, &ce_info[f], dest);
1207 i40e_write_dword(context_bytes, &ce_info[f], dest);
1210 i40e_write_qword(context_bytes, &ce_info[f], dest);
1215 return I40E_SUCCESS;
1219 * i40e_hmc_get_object_va - retrieves an object's virtual address
1220 * @hw: pointer to the hw structure
1221 * @object_base: pointer to u64 to get the va
1222 * @rsrc_type: the hmc resource type
1223 * @obj_idx: hmc object index
1225 * This function retrieves the object's virtual address from the object
1226 * base pointer. This function is used for LAN Queue contexts.
1229 enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
1231 enum i40e_hmc_lan_rsrc_type rsrc_type,
1234 u32 obj_offset_in_sd, obj_offset_in_pd;
1235 struct i40e_hmc_info *hmc_info = &hw->hmc;
1236 struct i40e_hmc_sd_entry *sd_entry;
1237 struct i40e_hmc_pd_entry *pd_entry;
1238 u32 pd_idx, pd_lmt, rel_pd_idx;
1239 enum i40e_status_code ret_code = I40E_SUCCESS;
1240 u64 obj_offset_in_fpm;
1243 if (NULL == hmc_info) {
1244 ret_code = I40E_ERR_BAD_PTR;
1245 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
1248 if (NULL == hmc_info->hmc_obj) {
1249 ret_code = I40E_ERR_BAD_PTR;
1250 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1253 if (NULL == object_base) {
1254 ret_code = I40E_ERR_BAD_PTR;
1255 DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1258 if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1259 ret_code = I40E_ERR_BAD_PTR;
1260 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1263 if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1264 DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1266 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1269 /* find sd index and limit */
1270 I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1273 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1274 obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1275 hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1277 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1278 I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1280 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1281 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1282 obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1283 I40E_HMC_PAGED_BP_SIZE);
1284 *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1286 obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1287 I40E_HMC_DIRECT_BP_SIZE);
1288 *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1295 * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1296 * @hw: the hardware struct
1297 * @queue: the queue we care about
1298 * @s: the struct to be filled
1300 enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1302 struct i40e_hmc_obj_txq *s)
1304 enum i40e_status_code err;
1307 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1311 return i40e_get_hmc_context(context_bytes,
1312 i40e_hmc_txq_ce_info, (u8 *)s);
1316 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1317 * @hw: the hardware struct
1318 * @queue: the queue we care about
1320 enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1323 enum i40e_status_code err;
1326 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1330 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1334 * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1335 * @hw: the hardware struct
1336 * @queue: the queue we care about
1337 * @s: the struct to be filled
1339 enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1341 struct i40e_hmc_obj_txq *s)
1343 enum i40e_status_code err;
1346 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1350 return i40e_set_hmc_context(context_bytes,
1351 i40e_hmc_txq_ce_info, (u8 *)s);
1355 * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1356 * @hw: the hardware struct
1357 * @queue: the queue we care about
1358 * @s: the struct to be filled
1360 enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1362 struct i40e_hmc_obj_rxq *s)
1364 enum i40e_status_code err;
1367 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1371 return i40e_get_hmc_context(context_bytes,
1372 i40e_hmc_rxq_ce_info, (u8 *)s);
1376 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1377 * @hw: the hardware struct
1378 * @queue: the queue we care about
1380 enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1383 enum i40e_status_code err;
1386 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1390 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1394 * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1395 * @hw: the hardware struct
1396 * @queue: the queue we care about
1397 * @s: the struct to be filled
1399 enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1401 struct i40e_hmc_obj_rxq *s)
1403 enum i40e_status_code err;
1406 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1410 return i40e_set_hmc_context(context_bytes,
1411 i40e_hmc_rxq_ce_info, (u8 *)s);