#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \
(0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S)
__le16 num_elems;
- struct ice_aqc_res_elem elem[1];
+ struct ice_aqc_res_elem elem[STRUCT_HACK_VAR_LEN];
};
/* Get Allocated Resource Descriptors Command (indirect 0x020A) */
* lookup-type
*/
__le16 hdr_len;
- u8 hdr[1];
+ u8 hdr[STRUCT_HACK_VAR_LEN];
};
/* Add/Update/Remove large action command/response entry
#define ICE_LG_ACT_STAT_COUNT 0x7
#define ICE_LG_ACT_STAT_COUNT_S 3
#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
- __le32 act[1]; /* array of size for actions */
+ __le32 act[STRUCT_HACK_VAR_LEN]; /* array of size for actions */
};
/* Add/Update/Remove VSI list command/response entry
struct ice_sw_rule_vsi_list {
__le16 index; /* Index of VSI/Prune list */
__le16 number_vsi;
- __le16 vsi[1]; /* Array of number_vsi VSI numbers */
+ __le16 vsi[STRUCT_HACK_VAR_LEN]; /* Array of number_vsi VSI numbers */
};
#pragma pack(1)
struct ice_aqc_move_elem {
struct ice_aqc_txsched_move_grp_info_hdr hdr;
- __le32 teid[1];
+ __le32 teid[STRUCT_HACK_VAR_LEN];
};
struct ice_aqc_elem_info_bw {
struct ice_aqc_add_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
- struct ice_aqc_txsched_elem_data generic[1];
+ struct ice_aqc_txsched_elem_data generic[STRUCT_HACK_VAR_LEN];
};
struct ice_aqc_get_topo_elem {
struct ice_aqc_delete_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
- __le32 teid[1];
+ __le32 teid[STRUCT_HACK_VAR_LEN];
};
/* Query Port ETS (indirect 0x040E)
__le32 parent_teid;
u8 num_txqs;
u8 rsvd[3];
- struct ice_aqc_add_txqs_perq txqs[1];
+ struct ice_aqc_add_txqs_perq txqs[STRUCT_HACK_VAR_LEN];
};
/* Disable Tx LAN Queues (indirect 0x0C31) */
(0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \
(1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
- __le16 q_id[1];
+ __le16 q_id[STRUCT_HACK_VAR_LEN];
};
#pragma pack()
struct ice_aqc_move_txqs_data {
__le32 src_teid;
__le32 dest_teid;
- struct ice_aqc_move_txqs_elem txqs[1];
+ struct ice_aqc_move_txqs_elem txqs[STRUCT_HACK_VAR_LEN];
};
/* Download Package (indirect 0x0C40) */
/* Get Package Info List response buffer format (0x0C43) */
struct ice_aqc_get_pkg_info_resp {
__le32 count;
- struct ice_aqc_get_pkg_info pkg_info[1];
+ struct ice_aqc_get_pkg_info pkg_info[STRUCT_HACK_VAR_LEN];
};
/* Driver Shared Parameters (direct, 0x0C90) */
enum ice_status status;
u16 buf_len;
- buf_len = ice_struct_size(buf, elem, num - 1);
- buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(buf, elem, num);
+ buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
if (status)
goto ice_alloc_res_exit;
- ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
+ ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
ICE_NONDMA_TO_NONDMA);
ice_alloc_res_exit:
enum ice_status status;
u16 buf_len;
- buf_len = ice_struct_size(buf, elem, num - 1);
+ buf_len = ice_struct_size(buf, elem, num);
buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
/* Prepare buffer to free resource. */
buf->num_elems = CPU_TO_LE16(num);
buf->res_type = CPU_TO_LE16(type);
- ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
+ ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
ICE_NONDMA_TO_NONDMA);
status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
{
- u16 i, sum_header_size, sum_q_size = 0;
struct ice_aqc_add_tx_qgrp *list;
struct ice_aqc_add_txqs *cmd;
struct ice_aq_desc desc;
+ u16 i, sum_size = 0;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM;
- sum_header_size = num_qgrps *
- (sizeof(*qg_list) - sizeof(*qg_list->txqs));
-
- list = qg_list;
- for (i = 0; i < num_qgrps; i++) {
- struct ice_aqc_add_txqs_perq *q = list->txqs;
-
- sum_q_size += list->num_txqs * sizeof(*q);
- list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
+ for (i = 0, list = qg_list; i < num_qgrps; i++) {
+ sum_size += ice_struct_size(list, txqs, list->num_txqs);
+ list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
+ list->num_txqs);
}
- if (buf_size != (sum_header_size + sum_q_size))
+ if (buf_size != sum_size)
return ICE_ERR_PARAM;
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
+ struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
enum ice_status status;
*/
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
- for (i = 0; i < num_qgrps; ++i) {
- /* Calculate the size taken up by the queue IDs in this group */
- sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
-
- /* Add the size of the group header */
- sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
+ for (i = 0, item = qg_list; i < num_qgrps; i++) {
+ u16 item_size = ice_struct_size(item, q_id, item->num_qs);
/* If the num of queues is even, add 2 bytes of padding */
- if ((qg_list[i].num_qs % 2) == 0)
- sz += 2;
+ if ((item->num_qs % 2) == 0)
+ item_size += 2;
+
+ sz += item_size;
+
+ item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
}
if (buf_size != sz)
struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
- struct ice_aqc_dis_txq_item qg_list;
+ struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
- u16 i;
+ struct ice_hw *hw;
+ u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ hw = pi->hw;
+
if (!num_queues) {
/* if queue is disabled already yet the disable queue command
* has to be sent to complete the VF reset, then call
* ice_aq_dis_lan_txq without any queue information
*/
if (rst_src)
- return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+ return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
vmvf_num, NULL);
return ICE_ERR_CFG;
}
+ buf_size = ice_struct_size(qg_list, q_id, 1);
+ qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
+ if (!qg_list)
+ return ICE_ERR_NO_MEMORY;
+
ice_acquire_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
if (!node)
continue;
- q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
if (!q_ctx) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
q_handles[i]);
continue;
}
if (q_ctx->q_handle != q_handles[i]) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
q_ctx->q_handle, q_handles[i]);
continue;
}
- qg_list.parent_teid = node->info.parent_teid;
- qg_list.num_qs = 1;
- qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
- status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
- sizeof(qg_list), rst_src, vmvf_num,
- cd);
+ qg_list->parent_teid = node->info.parent_teid;
+ qg_list->num_qs = 1;
+ qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
+ status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
+ vmvf_num, cd);
if (status != ICE_SUCCESS)
break;
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
}
ice_release_lock(&pi->sched_lock);
+ ice_free(hw, qg_list);
return status;
}
#define ICE_IEEE_APP_TLV_LEN 11
#pragma pack(1)
-/* IEEE 802.1AB LLDP TLV structure */
-struct ice_lldp_generic_tlv {
- __be16 typelen;
- u8 tlvinfo[1];
-};
-
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
__be16 typelen;
__be32 ouisubtype;
- u8 tlvinfo[1];
+ u8 tlvinfo[STRUCT_HACK_VAR_LEN];
};
#pragma pack()
#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
#define ICE_CEE_FEAT_TLV_ERR_M 0x20
u8 subtype;
- u8 tlvinfo[1];
+ u8 tlvinfo[STRUCT_HACK_VAR_LEN];
};
#pragma pack(1)
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT - 1);
+ size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
if (!pkg_info)
return ICE_ERR_NO_MEMORY;
u32 seg_count;
u32 i;
- if (len < sizeof(*pkg))
+ if (len < ice_struct_size(pkg, seg_offset, 1))
return ICE_ERR_BUF_TOO_SHORT;
if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
return ICE_ERR_CFG;
/* make sure segment array fits in package length */
- if (len < ice_struct_size(pkg, seg_offset, seg_count - 1))
+ if (len < ice_struct_size(pkg, seg_offset, seg_count))
return ICE_ERR_BUF_TOO_SHORT;
/* all segments must fit within length */
}
/* Check if FW is compatible with the OS package */
- size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+ size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
if (!pkg)
return ICE_ERR_NO_MEMORY;
sect_rx = (struct ice_boost_tcam_section *)
ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
- sizeof(*sect_rx));
+ ice_struct_size(sect_rx, tcam, 1));
if (!sect_rx)
goto ice_create_tunnel_err;
sect_rx->count = CPU_TO_LE16(1);
sect_tx = (struct ice_boost_tcam_section *)
ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
- sizeof(*sect_tx));
+ ice_struct_size(sect_tx, tcam, 1));
if (!sect_tx)
goto ice_create_tunnel_err;
sect_tx->count = CPU_TO_LE16(1);
}
/* size of section - there is at least one entry */
- size = ice_struct_size(sect_rx, tcam, count - 1);
+ size = ice_struct_size(sect_rx, tcam, count);
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
id = ice_sect_id(blk, ICE_VEC_TBL);
p = (struct ice_pkg_es *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p, es,
+ 1) +
vec_size -
sizeof(p->es[0]));
id = ice_sect_id(blk, ICE_PROF_TCAM);
p = (struct ice_prof_id_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p,
+ entry,
+ 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
id = ice_sect_id(blk, ICE_XLT1);
p = (struct ice_xlt1_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p,
+ value,
+ 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
case ICE_VSIG_REM:
id = ice_sect_id(blk, ICE_XLT2);
p = (struct ice_xlt2_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p,
+ value,
+ 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
- __le32 seg_offset[1];
+ __le32 seg_offset[STRUCT_HACK_VAR_LEN];
};
/* generic segment */
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
- struct ice_device_id_entry device_table[1];
+ struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
};
struct ice_nvm_table {
__le32 table_count;
- __le32 vers[1];
+ __le32 vers[STRUCT_HACK_VAR_LEN];
};
struct ice_buf {
struct ice_buf_table {
__le32 buf_count;
- struct ice_buf buf_array[1];
+ struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
};
/* global metadata specific segment */
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
- struct ice_section_entry section_entry[1];
+ struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
- sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
+ ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
+ (ent_sz))
/* ice package section IDs */
#define ICE_SID_XLT0_SW 10
struct ice_label_section {
__le16 count;
- struct ice_label label[1];
+ struct ice_label label[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- sizeof(struct ice_label_section) - sizeof(struct ice_label), \
- sizeof(struct ice_label))
+ ice_struct_size((struct ice_label_section *)0, label, 1) - \
+ sizeof(struct ice_label), sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
- struct ice_fv fv[1];
+ struct ice_fv fv[STRUCT_HACK_VAR_LEN];
};
struct ice_sw_fv_list_entry {
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
- struct ice_boost_tcam_entry tcam[1];
+ struct ice_boost_tcam_entry tcam[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- sizeof(struct ice_boost_tcam_section) - \
+ ice_struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
-#pragma pack(1)
struct ice_xlt1_section {
__le16 count;
__le16 offset;
- u8 value[1];
+ u8 value[STRUCT_HACK_VAR_LEN];
};
-#pragma pack()
-
-#define ICE_XLT1_SIZE(n) (sizeof(struct ice_xlt1_section) + \
- (sizeof(u8) * ((n) - 1)))
struct ice_xlt2_section {
__le16 count;
__le16 offset;
- __le16 value[1];
+ __le16 value[STRUCT_HACK_VAR_LEN];
};
-#define ICE_XLT2_SIZE(n) (sizeof(struct ice_xlt2_section) + \
- (sizeof(u16) * ((n) - 1)))
-
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
- u8 redir_value[1];
+ u8 redir_value[STRUCT_HACK_VAR_LEN];
};
-#define ICE_PROF_REDIR_SIZE(n) (sizeof(struct ice_prof_redir_section) + \
- (sizeof(u8) * ((n) - 1)))
-
/* package buffer building */
struct ice_buf_build {
struct ice_pkg_es {
__le16 count;
__le16 offset;
- struct ice_fv_word es[1];
+ struct ice_fv_word es[STRUCT_HACK_VAR_LEN];
};
struct ice_es {
u8 prof_id;
};
+#pragma pack()
+
struct ice_prof_id_section {
__le16 count;
- struct ice_prof_tcam_entry entry[1];
+ struct ice_prof_tcam_entry entry[STRUCT_HACK_VAR_LEN];
};
-#pragma pack()
struct ice_prof_tcam {
u32 sid;
enum ice_status status;
u16 buf_size;
- buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
+ buf_size = ice_struct_size(buf, teid, num_nodes);
buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
u16 buf_size;
u32 teid;
- buf_size = ice_struct_size(buf, generic, num_nodes - 1);
+ buf_size = ice_struct_size(buf, generic, num_nodes);
buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
struct ice_sched_node *node;
u16 i, grps_movd = 0;
struct ice_hw *hw;
+ u16 buf_len;
hw = pi->hw;
hw->max_children[parent->tx_sched_layer])
return ICE_ERR_AQ_FULL;
- buf = (struct ice_aqc_move_elem *)ice_malloc(hw, sizeof(*buf));
+ buf_len = ice_struct_size(buf, teid, 1);
+ buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = CPU_TO_LE16(1);
- status = ice_aq_move_sched_elems(hw, 1, buf, sizeof(*buf),
+ status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL);
if (status && grps_movd != 1) {
status = ICE_ERR_CFG;
enum ice_status status;
u16 buf_len;
- buf_len = sizeof(*sw_buf);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
enum ice_status status, ret_status;
u16 buf_len;
- buf_len = sizeof(*sw_buf);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
enum ice_status status;
u16 buf_len;
- buf_len = sizeof(*sw_buf);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
sw_buf->num_elems = CPU_TO_LE16(1);
enum ice_status status;
u16 buf_len;
- buf_len = sizeof(*sw_buf);
+ buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
u16 buf_len;
/* Allocate resource */
- buf_len = sizeof(*buf);
- buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(buf, elem, 1);
+ buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
u16 buf_len;
/* Free resource */
- buf_len = sizeof(*buf);
- buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(buf, elem, 1);
+ buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
return ICE_ERR_PARAM;
/* Allocate resource for large action */
- buf_len = sizeof(*sw_buf);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
#define DUMMY_ETH_HDR_LEN 16
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- FIELD_SIZEOF(struct ice_aqc_sw_rules_elem, pdata) + \
- sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
+ (DUMMY_ETH_HDR_LEN * \
+ sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- FIELD_SIZEOF(struct ice_aqc_sw_rules_elem, pdata) + \
- sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
#define ICE_SW_RULE_LG_ACT_SIZE(n) \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- FIELD_SIZEOF(struct ice_aqc_sw_rules_elem, pdata) + \
- sizeof(struct ice_sw_rule_lg_act) - \
- FIELD_SIZEOF(struct ice_sw_rule_lg_act, act) + \
- ((n) * FIELD_SIZEOF(struct ice_sw_rule_lg_act, act)))
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
+ ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- FIELD_SIZEOF(struct ice_aqc_sw_rules_elem, pdata) + \
- sizeof(struct ice_sw_rule_vsi_list) - \
- FIELD_SIZEOF(struct ice_sw_rule_vsi_list, vsi) + \
- ((n) * FIELD_SIZEOF(struct ice_sw_rule_vsi_list, vsi)))
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
+ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
/* Worst case buffer length for ice_aqc_opc_get_res_alloc */
#define ICE_MAX_RES_TYPES 0x80
#define IS_ASCII(_ch) ((_ch) < 0x80)
+#define STRUCT_HACK_VAR_LEN
+/**
+ * ice_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
#define ice_struct_size(ptr, field, num) \
(sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
int err;
struct ice_vsi *vsi;
struct ice_hw *hw;
- struct ice_aqc_add_tx_qgrp txq_elem;
+ struct ice_aqc_add_tx_qgrp *txq_elem;
struct ice_tlan_ctx tx_ctx;
+ int buf_len;
PMD_INIT_FUNC_TRACE();
return -EINVAL;
}
+ buf_len = ice_struct_size(txq_elem, txqs, 1);
+ txq_elem = ice_malloc(hw, buf_len);
+ if (!txq_elem)
+ return -ENOMEM;
+
vsi = txq->vsi;
hw = ICE_VSI_TO_HW(vsi);
- memset(&txq_elem, 0, sizeof(txq_elem));
memset(&tx_ctx, 0, sizeof(tx_ctx));
- txq_elem.num_txqs = 1;
- txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+ txq_elem->num_txqs = 1;
+ txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
- ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+ ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
ice_tlan_ctx_info);
txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
/* Fix me, we assume TC always 0 here */
err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
- &txq_elem, sizeof(txq_elem), NULL);
+ txq_elem, buf_len, NULL);
if (err) {
PMD_DRV_LOG(ERR, "Failed to add lan txq");
+ rte_free(txq_elem);
return -EIO;
}
/* store the schedule node id */
- txq->q_teid = txq_elem.txqs[0].q_teid;
+ txq->q_teid = txq_elem->txqs[0].q_teid;
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ rte_free(txq_elem);
return 0;
}
int err;
struct ice_vsi *vsi;
struct ice_hw *hw;
- struct ice_aqc_add_tx_qgrp txq_elem;
+ struct ice_aqc_add_tx_qgrp *txq_elem;
struct ice_tlan_ctx tx_ctx;
+ int buf_len;
PMD_INIT_FUNC_TRACE();
return -EINVAL;
}
+ buf_len = ice_struct_size(txq_elem, txqs, 1);
+ txq_elem = ice_malloc(hw, buf_len);
+ if (!txq_elem)
+ return -ENOMEM;
+
vsi = txq->vsi;
hw = ICE_VSI_TO_HW(vsi);
- memset(&txq_elem, 0, sizeof(txq_elem));
memset(&tx_ctx, 0, sizeof(tx_ctx));
- txq_elem.num_txqs = 1;
- txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+ txq_elem->num_txqs = 1;
+ txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
- ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+ ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
ice_tlan_ctx_info);
txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
/* Fix me, we assume TC always 0 here */
err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
- &txq_elem, sizeof(txq_elem), NULL);
+ txq_elem, buf_len, NULL);
if (err) {
PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
+ rte_free(txq_elem);
return -EIO;
}
/* store the schedule node id */
- txq->q_teid = txq_elem.txqs[0].q_teid;
+ txq->q_teid = txq_elem->txqs[0].q_teid;
+ rte_free(txq_elem);
return 0;
}