-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_CHAIN_H__
void **pp_virt_addr_tbl;
union {
- struct ecore_chain_pbl_u16 u16;
- struct ecore_chain_pbl_u32 u32;
+ struct ecore_chain_pbl_u16 pbl_u16;
+ struct ecore_chain_pbl_u32 pbl_u32;
} c;
} pbl;
if ((p_chain->u.chain16.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain16.prod_idx;
- p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.pbl_u16.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
if ((p_chain->u.chain32.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain32.prod_idx;
- p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.pbl_u32.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
if ((p_chain->u.chain16.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain16.cons_idx;
- p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.pbl_u16.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
if ((p_chain->u.chain32.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain32.cons_idx;
- p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.pbl_u32.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
p_chain->p_prod_elem = p_chain->p_virt_addr;
if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
- /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+ /* Use "page_cnt-1" as a reset value for the prod/cons page's
* indices, to avoid unnecessary page advancing on the first
* call to ecore_chain_produce/consume. Instead, the indices
* will be advanced to page_cnt and then will be wrapped to 0.
u32 reset_val = p_chain->page_cnt - 1;
if (is_chain_u16(p_chain)) {
- p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
- p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
+ p_chain->pbl.c.pbl_u16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.c.pbl_u16.cons_page_idx = (u16)reset_val;
} else {
- p_chain->pbl.c.u32.prod_page_idx = reset_val;
- p_chain->pbl.c.u32.cons_page_idx = reset_val;
+ p_chain->pbl.c.pbl_u32.prod_page_idx = reset_val;
+ p_chain->pbl.c.pbl_u32.cons_page_idx = reset_val;
}
}
static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ u32 cur_prod, page_mask, page_cnt, page_diff;
+
+ cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx
+ : p_chain->u.chain32.prod_idx;
+
+ /* Assume that number of elements in a page is power of 2 */
+ page_mask = ~p_chain->elem_per_page_mask;
+
+ /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
+ * reaches the first element of next page before the page index
+ * is incremented. See ecore_chain_produce().
+ * Index wrap around is not a problem because the difference
+ * between current and given producer indexes is always
+ * positive and lower than the chain's capacity.
+ */
+ page_diff = (((cur_prod - 1) & page_mask) -
+ ((prod_idx - 1) & page_mask)) /
+ p_chain->elem_per_page;
+
+ page_cnt = ecore_chain_get_page_cnt(p_chain);
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.pbl_u16.prod_page_idx =
+ (p_chain->pbl.c.pbl_u16.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ else
+ p_chain->pbl.c.pbl_u32.prod_page_idx =
+ (p_chain->pbl.c.pbl_u32.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ }
+
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16)prod_idx;
else
p_chain->p_prod_elem = p_prod_elem;
}
+/**
+ * @brief ecore_chain_set_cons - sets the cons to the given value
+ *
+ * @param cons_idx
+ * @param p_cons_elem
+ */
+static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain,
+ u32 cons_idx, void *p_cons_elem)
+{
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ u32 cur_cons, page_mask, page_cnt, page_diff;
+
+ cur_cons = is_chain_u16(p_chain) ? p_chain->u.chain16.cons_idx
+ : p_chain->u.chain32.cons_idx;
+
+ /* Assume that number of elements in a page is power of 2 */
+ page_mask = ~p_chain->elem_per_page_mask;
+
+ /* Use "cur_cons - 1" and "cons_idx - 1" since consumer index
+ * reaches the first element of next page before the page index
+ * is incremented. See ecore_chain_consume().
+ * Index wrap around is not a problem because the difference
+ * between current and given consumer indexes is always
+ * positive and lower than the chain's capacity.
+ */
+ page_diff = (((cur_cons - 1) & page_mask) -
+ ((cons_idx - 1) & page_mask)) /
+ p_chain->elem_per_page;
+
+ page_cnt = ecore_chain_get_page_cnt(p_chain);
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.pbl_u16.cons_page_idx =
+ (p_chain->pbl.c.pbl_u16.cons_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ else
+ p_chain->pbl.c.pbl_u32.cons_page_idx =
+ (p_chain->pbl.c.pbl_u32.cons_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ }
+
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx = (u16)cons_idx;
+ else
+ p_chain->u.chain32.cons_idx = cons_idx;
+
+ p_chain->p_cons_elem = p_cons_elem;
+}
+
/**
* @brief ecore_chain_pbl_zero_mem - set chain memory to 0
*