1 /*******************************************************************************
3 Copyright (c) 2013 - 2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_osdep.h"
35 #include "i40e_register.h"
36 #include "i40e_type.h"
38 #include "i40e_lan_hmc.h"
39 #include "i40e_prototype.h"
41 /* lan specific interface functions */
44 * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
45 * @offset: base address offset needing alignment
47 * Aligns the layer 2 function private memory so it's 512-byte aligned.
49 STATIC u64 i40e_align_l2obj_base(u64 offset)
51 u64 aligned_offset = offset;
53 if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
54 aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
55 (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
57 return aligned_offset;
61 * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
62 * @txq_num: number of Tx queues needing backing context
63 * @rxq_num: number of Rx queues needing backing context
64 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
65 * @fcoe_filt_num: number of FCoE filters needing backing context
67 * Calculates the maximum amount of memory for the function required, based
68 * on the number of resources it must provide context for.
70 u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
71 u32 fcoe_cntx_num, u32 fcoe_filt_num)
75 fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
76 fpm_size = i40e_align_l2obj_base(fpm_size);
78 fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
79 fpm_size = i40e_align_l2obj_base(fpm_size);
81 fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
82 fpm_size = i40e_align_l2obj_base(fpm_size);
84 fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
85 fpm_size = i40e_align_l2obj_base(fpm_size);
91 * i40e_init_lan_hmc - initialize i40e_hmc_info struct
92 * @hw: pointer to the HW structure
93 * @txq_num: number of Tx queues needing backing context
94 * @rxq_num: number of Rx queues needing backing context
95 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
96 * @fcoe_filt_num: number of FCoE filters needing backing context
98 * This function will be called once per physical function initialization.
99 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
100 * the driver's provided input, as well as information from the HMC itself
104 * - HMC Resource Profile has been selected before calling this function.
106 enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
107 u32 rxq_num, u32 fcoe_cntx_num,
110 struct i40e_hmc_obj_info *obj, *full_obj;
111 enum i40e_status_code ret_code = I40E_SUCCESS;
115 hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
116 hw->hmc.hmc_fn_id = hw->pf_id;
118 /* allocate memory for hmc_obj */
119 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
120 sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
122 goto init_lan_hmc_out;
123 hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
124 hw->hmc.hmc_obj_virt_mem.va;
126 /* The full object will be used to create the LAN HMC SD */
127 full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
128 full_obj->max_cnt = 0;
133 /* Tx queue context information */
134 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
135 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
138 size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
139 obj->size = (u64)1 << size_exp;
141 /* validate values requested by driver don't exceed HMC capacity */
142 if (txq_num > obj->max_cnt) {
143 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
144 DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
145 txq_num, obj->max_cnt, ret_code);
146 goto init_lan_hmc_out;
149 /* aggregate values into the full LAN object for later */
150 full_obj->max_cnt += obj->max_cnt;
151 full_obj->cnt += obj->cnt;
153 /* Rx queue context information */
154 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
155 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
157 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
158 (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
159 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
160 obj->base = i40e_align_l2obj_base(obj->base);
161 size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
162 obj->size = (u64)1 << size_exp;
164 /* validate values requested by driver don't exceed HMC capacity */
165 if (rxq_num > obj->max_cnt) {
166 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
167 DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
168 rxq_num, obj->max_cnt, ret_code);
169 goto init_lan_hmc_out;
172 /* aggregate values into the full LAN object for later */
173 full_obj->max_cnt += obj->max_cnt;
174 full_obj->cnt += obj->cnt;
176 /* FCoE context information */
177 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
178 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
179 obj->cnt = fcoe_cntx_num;
180 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
181 (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
182 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
183 obj->base = i40e_align_l2obj_base(obj->base);
184 size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
185 obj->size = (u64)1 << size_exp;
187 /* validate values requested by driver don't exceed HMC capacity */
188 if (fcoe_cntx_num > obj->max_cnt) {
189 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
190 DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
191 fcoe_cntx_num, obj->max_cnt, ret_code);
192 goto init_lan_hmc_out;
195 /* aggregate values into the full LAN object for later */
196 full_obj->max_cnt += obj->max_cnt;
197 full_obj->cnt += obj->cnt;
199 /* FCoE filter information */
200 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
201 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
202 obj->cnt = fcoe_filt_num;
203 obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
204 (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
205 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
206 obj->base = i40e_align_l2obj_base(obj->base);
207 size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
208 obj->size = (u64)1 << size_exp;
210 /* validate values requested by driver don't exceed HMC capacity */
211 if (fcoe_filt_num > obj->max_cnt) {
212 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
213 DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
214 fcoe_filt_num, obj->max_cnt, ret_code);
215 goto init_lan_hmc_out;
218 /* aggregate values into the full LAN object for later */
219 full_obj->max_cnt += obj->max_cnt;
220 full_obj->cnt += obj->cnt;
222 hw->hmc.first_sd_index = 0;
223 hw->hmc.sd_table.ref_cnt = 0;
224 l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
226 if (NULL == hw->hmc.sd_table.sd_entry) {
227 hw->hmc.sd_table.sd_cnt = (u32)
228 (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
229 I40E_HMC_DIRECT_BP_SIZE;
231 /* allocate the sd_entry members in the sd_table */
232 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
233 (sizeof(struct i40e_hmc_sd_entry) *
234 hw->hmc.sd_table.sd_cnt));
236 goto init_lan_hmc_out;
237 hw->hmc.sd_table.sd_entry =
238 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
240 /* store in the LAN full object for later */
241 full_obj->size = l2fpm_size;
248 * i40e_remove_pd_page - Remove a page from the page descriptor table
249 * @hw: pointer to the HW structure
250 * @hmc_info: pointer to the HMC configuration information structure
251 * @idx: segment descriptor index to find the relevant page descriptor
254 * 1. Marks the entry in pd table (for paged address mode) invalid
255 * 2. write to register PMPDINV to invalidate the backing page in FV cache
256 * 3. Decrement the ref count for pd_entry
258 * 1. caller can deallocate the memory used by pd after this function
261 STATIC enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
262 struct i40e_hmc_info *hmc_info,
265 enum i40e_status_code ret_code = I40E_SUCCESS;
267 if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
268 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
274 * i40e_remove_sd_bp - remove a backing page from a segment descriptor
275 * @hw: pointer to our HW structure
276 * @hmc_info: pointer to the HMC configuration information structure
277 * @idx: the page index
280 * 1. Marks the entry in sd table (for direct address mode) invalid
281 * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
282 * to 0) and PMSDDATAHIGH to invalidate the sd page
283 * 3. Decrement the ref count for the sd_entry
285 * 1. caller can deallocate the memory used by backing storage after this
288 STATIC enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
289 struct i40e_hmc_info *hmc_info,
292 enum i40e_status_code ret_code = I40E_SUCCESS;
294 if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
295 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
301 * i40e_create_lan_hmc_object - allocate backing store for hmc objects
302 * @hw: pointer to the HW structure
303 * @info: pointer to i40e_hmc_create_obj_info struct
305 * This will allocate memory for PDs and backing pages and populate
306 * the sd and pd entries.
308 enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
309 struct i40e_hmc_lan_create_obj_info *info)
311 enum i40e_status_code ret_code = I40E_SUCCESS;
312 struct i40e_hmc_sd_entry *sd_entry;
313 u32 pd_idx1 = 0, pd_lmt1 = 0;
314 u32 pd_idx = 0, pd_lmt = 0;
315 bool pd_error = false;
321 ret_code = I40E_ERR_BAD_PTR;
322 DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
325 if (NULL == info->hmc_info) {
326 ret_code = I40E_ERR_BAD_PTR;
327 DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
330 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
331 ret_code = I40E_ERR_BAD_PTR;
332 DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
336 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
337 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
338 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
342 if ((info->start_idx + info->count) >
343 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
344 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
345 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
350 /* find sd index and limit */
351 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
352 info->start_idx, info->count,
354 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
355 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
356 ret_code = I40E_ERR_INVALID_SD_INDEX;
360 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
361 info->start_idx, info->count, &pd_idx,
364 /* This is to cover for cases where you may not want to have an SD with
365 * the full 2M memory but something smaller. By not filling out any
366 * size, the function will default the SD size to be 2M.
368 if (info->direct_mode_sz == 0)
369 sd_size = I40E_HMC_DIRECT_BP_SIZE;
371 sd_size = info->direct_mode_sz;
373 /* check if all the sds are valid. If not, allocate a page and
376 for (j = sd_idx; j < sd_lmt; j++) {
377 /* update the sd table entry */
378 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
381 if (I40E_SUCCESS != ret_code)
383 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
384 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
385 /* check if all the pds in this sd are valid. If not,
386 * allocate a page and initialize it.
389 /* find pd_idx and pd_lmt in this sd */
390 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
391 pd_lmt1 = min(pd_lmt,
392 ((j + 1) * I40E_HMC_MAX_BP_COUNT));
393 for (i = pd_idx1; i < pd_lmt1; i++) {
394 /* update the pd table entry */
395 ret_code = i40e_add_pd_table_entry(hw,
398 if (I40E_SUCCESS != ret_code) {
404 /* remove the backing pages from pd_idx1 to i */
405 while (i && (i > pd_idx1)) {
406 i40e_remove_pd_bp(hw, info->hmc_info,
412 if (!sd_entry->valid) {
413 sd_entry->valid = true;
414 switch (sd_entry->entry_type) {
415 case I40E_SD_TYPE_PAGED:
416 I40E_SET_PF_SD_ENTRY(hw,
417 sd_entry->u.pd_table.pd_page_addr.pa,
418 j, sd_entry->entry_type);
420 case I40E_SD_TYPE_DIRECT:
421 I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
422 j, sd_entry->entry_type);
425 ret_code = I40E_ERR_INVALID_SD_TYPE;
434 /* cleanup for sd entries from j to sd_idx */
435 while (j && (j > sd_idx)) {
436 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
437 switch (sd_entry->entry_type) {
438 case I40E_SD_TYPE_PAGED:
439 pd_idx1 = max(pd_idx,
440 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
441 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
442 for (i = pd_idx1; i < pd_lmt1; i++) {
443 i40e_remove_pd_bp(hw, info->hmc_info, i);
445 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
447 case I40E_SD_TYPE_DIRECT:
448 i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
451 ret_code = I40E_ERR_INVALID_SD_TYPE;
461 * i40e_configure_lan_hmc - prepare the HMC backing store
462 * @hw: pointer to the hw structure
463 * @model: the model for the layout of the SD/PD tables
465 * - This function will be called once per physical function initialization.
466 * - This function will be called after i40e_init_lan_hmc() and before
467 * any LAN/FCoE HMC objects can be created.
469 enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
470 enum i40e_hmc_model model)
472 struct i40e_hmc_lan_create_obj_info info;
473 u8 hmc_fn_id = hw->hmc.hmc_fn_id;
474 struct i40e_hmc_obj_info *obj;
475 enum i40e_status_code ret_code = I40E_SUCCESS;
477 /* Initialize part of the create object info struct */
478 info.hmc_info = &hw->hmc;
479 info.rsrc_type = I40E_HMC_LAN_FULL;
481 info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
483 /* Build the SD entry for the LAN objects */
485 case I40E_HMC_MODEL_DIRECT_PREFERRED:
486 case I40E_HMC_MODEL_DIRECT_ONLY:
487 info.entry_type = I40E_SD_TYPE_DIRECT;
488 /* Make one big object, a single SD */
490 ret_code = i40e_create_lan_hmc_object(hw, &info);
491 if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
493 else if (ret_code != I40E_SUCCESS)
494 goto configure_lan_hmc_out;
495 /* else clause falls through the break */
497 case I40E_HMC_MODEL_PAGED_ONLY:
499 info.entry_type = I40E_SD_TYPE_PAGED;
500 /* Make one big object in the PD table */
502 ret_code = i40e_create_lan_hmc_object(hw, &info);
503 if (ret_code != I40E_SUCCESS)
504 goto configure_lan_hmc_out;
507 /* unsupported type */
508 ret_code = I40E_ERR_INVALID_SD_TYPE;
509 DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
511 goto configure_lan_hmc_out;
515 /* Configure and program the FPM registers so objects can be created */
518 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
519 wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
520 (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
521 wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
524 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
525 wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
526 (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
527 wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
530 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
531 wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
532 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
533 wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
536 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
537 wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
538 (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
539 wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
541 configure_lan_hmc_out:
546 * i40e_delete_hmc_object - remove hmc objects
547 * @hw: pointer to the HW structure
548 * @info: pointer to i40e_hmc_delete_obj_info struct
550 * This will de-populate the SDs and PDs. It frees
551 * the memory for PDS and backing storage. After this function is returned,
552 * caller should deallocate memory allocated previously for
553 * book-keeping information about PDs and backing storage.
555 enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
556 struct i40e_hmc_lan_delete_obj_info *info)
558 enum i40e_status_code ret_code = I40E_SUCCESS;
559 struct i40e_hmc_pd_table *pd_table;
560 u32 pd_idx, pd_lmt, rel_pd_idx;
565 ret_code = I40E_ERR_BAD_PTR;
566 DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
569 if (NULL == info->hmc_info) {
570 ret_code = I40E_ERR_BAD_PTR;
571 DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
574 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
575 ret_code = I40E_ERR_BAD_PTR;
576 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
580 if (NULL == info->hmc_info->sd_table.sd_entry) {
581 ret_code = I40E_ERR_BAD_PTR;
582 DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
586 if (NULL == info->hmc_info->hmc_obj) {
587 ret_code = I40E_ERR_BAD_PTR;
588 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
591 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
592 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
593 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
598 if ((info->start_idx + info->count) >
599 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
600 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
601 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
606 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
607 info->start_idx, info->count, &pd_idx,
610 for (j = pd_idx; j < pd_lmt; j++) {
611 sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
613 if (I40E_SD_TYPE_PAGED !=
614 info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
617 rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
620 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
621 if (pd_table->pd_entry[rel_pd_idx].valid) {
622 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
623 if (I40E_SUCCESS != ret_code)
628 /* find sd index and limit */
629 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
630 info->start_idx, info->count,
632 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
633 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
634 ret_code = I40E_ERR_INVALID_SD_INDEX;
638 for (i = sd_idx; i < sd_lmt; i++) {
639 if (!info->hmc_info->sd_table.sd_entry[i].valid)
641 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
642 case I40E_SD_TYPE_DIRECT:
643 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
644 if (I40E_SUCCESS != ret_code)
647 case I40E_SD_TYPE_PAGED:
648 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
649 if (I40E_SUCCESS != ret_code)
661 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
662 * @hw: pointer to the hw structure
664 * This must be called by drivers as they are shutting down and being
665 * removed from the OS.
667 enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
669 struct i40e_hmc_lan_delete_obj_info info;
670 enum i40e_status_code ret_code;
672 info.hmc_info = &hw->hmc;
673 info.rsrc_type = I40E_HMC_LAN_FULL;
677 /* delete the object */
678 ret_code = i40e_delete_lan_hmc_object(hw, &info);
680 /* free the SD table entry for LAN */
681 i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
682 hw->hmc.sd_table.sd_cnt = 0;
683 hw->hmc.sd_table.sd_entry = NULL;
685 /* free memory used for hmc_obj */
686 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
687 hw->hmc.hmc_obj = NULL;
692 #define I40E_HMC_STORE(_struct, _ele) \
693 offsetof(struct _struct, _ele), \
694 FIELD_SIZEOF(struct _struct, _ele)
696 struct i40e_context_ele {
703 /* LAN Tx Queue Context */
704 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
705 /* Field Width LSB */
706 {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
707 {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
708 {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
709 {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
710 {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
711 {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
712 {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
713 {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
715 {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
716 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
717 {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
718 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
719 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
720 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
721 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
723 {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
724 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
725 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
729 /* LAN Rx Queue Context */
730 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
731 /* Field Width LSB */
732 { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
733 { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
734 { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
735 { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
736 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
737 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
738 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
739 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
740 { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
741 { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
746 { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
750 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
751 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
752 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
757 * i40e_write_byte - replace HMC context byte
758 * @hmc_bits: pointer to the HMC memory
759 * @ce_info: a description of the struct to be read from
760 * @src: the struct to be read from
762 static void i40e_write_byte(u8 *hmc_bits,
763 struct i40e_context_ele *ce_info,
766 u8 src_byte, dest_byte, mask;
770 /* copy from the next struct field */
771 from = src + ce_info->offset;
773 /* prepare the bits and mask */
774 shift_width = ce_info->lsb % 8;
775 mask = ((u8)1 << ce_info->width) - 1;
780 /* shift to correct alignment */
781 mask <<= shift_width;
782 src_byte <<= shift_width;
784 /* get the current bits from the target bit string */
785 dest = hmc_bits + (ce_info->lsb / 8);
787 i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
789 dest_byte &= ~mask; /* get the bits not changing */
790 dest_byte |= src_byte; /* add in the new bits */
792 /* put it all back */
793 i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
797 * i40e_write_word - replace HMC context word
798 * @hmc_bits: pointer to the HMC memory
799 * @ce_info: a description of the struct to be read from
800 * @src: the struct to be read from
802 static void i40e_write_word(u8 *hmc_bits,
803 struct i40e_context_ele *ce_info,
806 u16 src_word, dest_word, mask;
810 /* copy from the next struct field */
811 from = src + ce_info->offset;
813 /* prepare the bits and mask */
814 shift_width = ce_info->lsb % 8;
815 mask = ((u16)1 << ce_info->width) - 1;
817 /* don't swizzle the bits until after the mask because the mask bits
818 * will be in a different bit position on big endian machines
820 src_word = *(u16 *)from;
823 /* shift to correct alignment */
824 mask <<= shift_width;
825 src_word <<= shift_width;
827 /* get the current bits from the target bit string */
828 dest = hmc_bits + (ce_info->lsb / 8);
830 i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
832 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
833 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
835 /* put it all back */
836 i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
840 * i40e_write_dword - replace HMC context dword
841 * @hmc_bits: pointer to the HMC memory
842 * @ce_info: a description of the struct to be read from
843 * @src: the struct to be read from
845 static void i40e_write_dword(u8 *hmc_bits,
846 struct i40e_context_ele *ce_info,
849 u32 src_dword, dest_dword, mask;
853 /* copy from the next struct field */
854 from = src + ce_info->offset;
856 /* prepare the bits and mask */
857 shift_width = ce_info->lsb % 8;
859 /* if the field width is exactly 32 on an x86 machine, then the shift
860 * operation will not work because the SHL instructions count is masked
861 * to 5 bits so the shift will do nothing
863 if (ce_info->width < 32)
864 mask = ((u32)1 << ce_info->width) - 1;
868 /* don't swizzle the bits until after the mask because the mask bits
869 * will be in a different bit position on big endian machines
871 src_dword = *(u32 *)from;
874 /* shift to correct alignment */
875 mask <<= shift_width;
876 src_dword <<= shift_width;
878 /* get the current bits from the target bit string */
879 dest = hmc_bits + (ce_info->lsb / 8);
881 i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
883 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
884 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
886 /* put it all back */
887 i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
891 * i40e_write_qword - replace HMC context qword
892 * @hmc_bits: pointer to the HMC memory
893 * @ce_info: a description of the struct to be read from
894 * @src: the struct to be read from
896 static void i40e_write_qword(u8 *hmc_bits,
897 struct i40e_context_ele *ce_info,
900 u64 src_qword, dest_qword, mask;
904 /* copy from the next struct field */
905 from = src + ce_info->offset;
907 /* prepare the bits and mask */
908 shift_width = ce_info->lsb % 8;
910 /* if the field width is exactly 64 on an x86 machine, then the shift
911 * operation will not work because the SHL instructions count is masked
912 * to 6 bits so the shift will do nothing
914 if (ce_info->width < 64)
915 mask = ((u64)1 << ce_info->width) - 1;
917 mask = 0xFFFFFFFFFFFFFFFF;
919 /* don't swizzle the bits until after the mask because the mask bits
920 * will be in a different bit position on big endian machines
922 src_qword = *(u64 *)from;
925 /* shift to correct alignment */
926 mask <<= shift_width;
927 src_qword <<= shift_width;
929 /* get the current bits from the target bit string */
930 dest = hmc_bits + (ce_info->lsb / 8);
932 i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
934 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
935 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
937 /* put it all back */
938 i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
942 * i40e_read_byte - read HMC context byte into struct
943 * @hmc_bits: pointer to the HMC memory
944 * @ce_info: a description of the struct to be filled
945 * @dest: the struct to be filled
947 static void i40e_read_byte(u8 *hmc_bits,
948 struct i40e_context_ele *ce_info,
955 /* prepare the bits and mask */
956 shift_width = ce_info->lsb % 8;
957 mask = ((u8)1 << ce_info->width) - 1;
959 /* shift to correct alignment */
960 mask <<= shift_width;
962 /* get the current bits from the src bit string */
963 src = hmc_bits + (ce_info->lsb / 8);
965 i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
967 dest_byte &= ~(mask);
969 dest_byte >>= shift_width;
971 /* get the address from the struct field */
972 target = dest + ce_info->offset;
974 /* put it back in the struct */
975 i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
979 * i40e_read_word - read HMC context word into struct
980 * @hmc_bits: pointer to the HMC memory
981 * @ce_info: a description of the struct to be filled
982 * @dest: the struct to be filled
984 static void i40e_read_word(u8 *hmc_bits,
985 struct i40e_context_ele *ce_info,
988 u16 src_word, dest_word, mask;
992 /* prepare the bits and mask */
993 shift_width = ce_info->lsb % 8;
994 mask = ((u16)1 << ce_info->width) - 1;
996 /* shift to correct alignment */
997 mask <<= shift_width;
999 /* get the current bits from the src bit string */
1000 src = hmc_bits + (ce_info->lsb / 8);
1002 i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1004 /* the data in the memory is stored as little endian so mask it
1007 src_word &= ~(CPU_TO_LE16(mask));
1009 /* get the data back into host order before shifting */
1010 dest_word = LE16_TO_CPU(src_word);
1012 dest_word >>= shift_width;
1014 /* get the address from the struct field */
1015 target = dest + ce_info->offset;
1017 /* put it back in the struct */
1018 i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1022 * i40e_read_dword - read HMC context dword into struct
1023 * @hmc_bits: pointer to the HMC memory
1024 * @ce_info: a description of the struct to be filled
1025 * @dest: the struct to be filled
1027 static void i40e_read_dword(u8 *hmc_bits,
1028 struct i40e_context_ele *ce_info,
1031 u32 src_dword, dest_dword, mask;
1035 /* prepare the bits and mask */
1036 shift_width = ce_info->lsb % 8;
1038 /* if the field width is exactly 32 on an x86 machine, then the shift
1039 * operation will not work because the SHL instructions count is masked
1040 * to 5 bits so the shift will do nothing
1042 if (ce_info->width < 32)
1043 mask = ((u32)1 << ce_info->width) - 1;
1047 /* shift to correct alignment */
1048 mask <<= shift_width;
1050 /* get the current bits from the src bit string */
1051 src = hmc_bits + (ce_info->lsb / 8);
1053 i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1055 /* the data in the memory is stored as little endian so mask it
1058 src_dword &= ~(CPU_TO_LE32(mask));
1060 /* get the data back into host order before shifting */
1061 dest_dword = LE32_TO_CPU(src_dword);
1063 dest_dword >>= shift_width;
1065 /* get the address from the struct field */
1066 target = dest + ce_info->offset;
1068 /* put it back in the struct */
1069 i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1070 I40E_NONDMA_TO_DMA);
1074 * i40e_read_qword - read HMC context qword into struct
1075 * @hmc_bits: pointer to the HMC memory
1076 * @ce_info: a description of the struct to be filled
1077 * @dest: the struct to be filled
1079 static void i40e_read_qword(u8 *hmc_bits,
1080 struct i40e_context_ele *ce_info,
1083 u64 src_qword, dest_qword, mask;
1087 /* prepare the bits and mask */
1088 shift_width = ce_info->lsb % 8;
1090 /* if the field width is exactly 64 on an x86 machine, then the shift
1091 * operation will not work because the SHL instructions count is masked
1092 * to 6 bits so the shift will do nothing
1094 if (ce_info->width < 64)
1095 mask = ((u64)1 << ce_info->width) - 1;
1097 mask = 0xFFFFFFFFFFFFFFFF;
1099 /* shift to correct alignment */
1100 mask <<= shift_width;
1102 /* get the current bits from the src bit string */
1103 src = hmc_bits + (ce_info->lsb / 8);
1105 i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1107 /* the data in the memory is stored as little endian so mask it
1110 src_qword &= ~(CPU_TO_LE64(mask));
1112 /* get the data back into host order before shifting */
1113 dest_qword = LE64_TO_CPU(src_qword);
1115 dest_qword >>= shift_width;
1117 /* get the address from the struct field */
1118 target = dest + ce_info->offset;
1120 /* put it back in the struct */
1121 i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1122 I40E_NONDMA_TO_DMA);
1126 * i40e_get_hmc_context - extract HMC context bits
1127 * @context_bytes: pointer to the context bit array
1128 * @ce_info: a description of the struct to be filled
1129 * @dest: the struct to be filled
1131 static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1132 struct i40e_context_ele *ce_info,
1137 for (f = 0; ce_info[f].width != 0; f++) {
1138 switch (ce_info[f].size_of) {
1140 i40e_read_byte(context_bytes, &ce_info[f], dest);
1143 i40e_read_word(context_bytes, &ce_info[f], dest);
1146 i40e_read_dword(context_bytes, &ce_info[f], dest);
1149 i40e_read_qword(context_bytes, &ce_info[f], dest);
1152 /* nothing to do, just keep going */
1157 return I40E_SUCCESS;
1161 * i40e_clear_hmc_context - zero out the HMC context bits
1162 * @hw: the hardware struct
1163 * @context_bytes: pointer to the context bit array (DMA memory)
1164 * @hmc_type: the type of HMC resource
1166 static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1168 enum i40e_hmc_lan_rsrc_type hmc_type)
1170 /* clean the bit array */
1171 i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1174 return I40E_SUCCESS;
1178 * i40e_set_hmc_context - replace HMC context bits
1179 * @context_bytes: pointer to the context bit array
1180 * @ce_info: a description of the struct to be filled
1181 * @dest: the struct to be filled
1183 static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1184 struct i40e_context_ele *ce_info,
1189 for (f = 0; ce_info[f].width != 0; f++) {
1191 /* we have to deal with each element of the HMC using the
1192 * correct size so that we are correct regardless of the
1193 * endianness of the machine
1195 switch (ce_info[f].size_of) {
1197 i40e_write_byte(context_bytes, &ce_info[f], dest);
1200 i40e_write_word(context_bytes, &ce_info[f], dest);
1203 i40e_write_dword(context_bytes, &ce_info[f], dest);
1206 i40e_write_qword(context_bytes, &ce_info[f], dest);
1211 return I40E_SUCCESS;
1215 * i40e_hmc_get_object_va - retrieves an object's virtual address
1216 * @hmc_info: pointer to i40e_hmc_info struct
1217 * @object_base: pointer to u64 to get the va
1218 * @rsrc_type: the hmc resource type
1219 * @obj_idx: hmc object index
1221 * This function retrieves the object's virtual address from the object
1222 * base pointer. This function is used for LAN Queue contexts.
1225 enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
1227 enum i40e_hmc_lan_rsrc_type rsrc_type,
1230 u32 obj_offset_in_sd, obj_offset_in_pd;
1231 struct i40e_hmc_sd_entry *sd_entry;
1232 struct i40e_hmc_pd_entry *pd_entry;
1233 u32 pd_idx, pd_lmt, rel_pd_idx;
1234 enum i40e_status_code ret_code = I40E_SUCCESS;
1235 u64 obj_offset_in_fpm;
1238 if (NULL == hmc_info) {
1239 ret_code = I40E_ERR_BAD_PTR;
1240 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
1243 if (NULL == hmc_info->hmc_obj) {
1244 ret_code = I40E_ERR_BAD_PTR;
1245 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1248 if (NULL == object_base) {
1249 ret_code = I40E_ERR_BAD_PTR;
1250 DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1253 if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1254 ret_code = I40E_ERR_BAD_PTR;
1255 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1258 if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1259 DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1261 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1264 /* find sd index and limit */
1265 I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1268 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1269 obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1270 hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1272 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1273 I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1275 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1276 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1277 obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1278 I40E_HMC_PAGED_BP_SIZE);
1279 *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1281 obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1282 I40E_HMC_DIRECT_BP_SIZE);
1283 *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1290 * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1291 * @hw: the hardware struct
1292 * @queue: the queue we care about
1293 * @s: the struct to be filled
1295 enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1297 struct i40e_hmc_obj_txq *s)
1299 enum i40e_status_code err;
1302 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1303 I40E_HMC_LAN_TX, queue);
1307 return i40e_get_hmc_context(context_bytes,
1308 i40e_hmc_txq_ce_info, (u8 *)s);
1312 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1313 * @hw: the hardware struct
1314 * @queue: the queue we care about
1316 enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1319 enum i40e_status_code err;
1322 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1323 I40E_HMC_LAN_TX, queue);
1327 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1331 * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1332 * @hw: the hardware struct
1333 * @queue: the queue we care about
1334 * @s: the struct to be filled
1336 enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1338 struct i40e_hmc_obj_txq *s)
1340 enum i40e_status_code err;
1343 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1344 I40E_HMC_LAN_TX, queue);
1348 return i40e_set_hmc_context(context_bytes,
1349 i40e_hmc_txq_ce_info, (u8 *)s);
1353 * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1354 * @hw: the hardware struct
1355 * @queue: the queue we care about
1356 * @s: the struct to be filled
1358 enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1360 struct i40e_hmc_obj_rxq *s)
1362 enum i40e_status_code err;
1365 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1366 I40E_HMC_LAN_RX, queue);
1370 return i40e_get_hmc_context(context_bytes,
1371 i40e_hmc_rxq_ce_info, (u8 *)s);
1375 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1376 * @hw: the hardware struct
1377 * @queue: the queue we care about
1379 enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1382 enum i40e_status_code err;
1385 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1386 I40E_HMC_LAN_RX, queue);
1390 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1394 * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1395 * @hw: the hardware struct
1396 * @queue: the queue we care about
1397 * @s: the struct to be filled
1399 enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1401 struct i40e_hmc_obj_rxq *s)
1403 enum i40e_status_code err;
1406 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1407 I40E_HMC_LAN_RX, queue);
1411 return i40e_set_hmc_context(context_bytes,
1412 i40e_hmc_rxq_ce_info, (u8 *)s);