* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* This is the HWRM response header. */
/* hwrm_resp_hdr (size:64b/8B) */
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
-} __attribute__((packed));
+} __rte_packed;
/*
* TLV encapsulated message. Use the TLV type field of the
* and it must be an integer multiple of 8B.
*/
uint16_t length;
-} __attribute__((packed));
+} __rte_packed;
/* Input */
/* input (size:128b/16B) */
* and must be cleared to zero before the request is made.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* Output */
/* output (size:64b/8B) */
* memory.
*/
uint16_t resp_len;
-} __attribute__((packed));
+} __rte_packed;
/* Short Command Structure */
/* hwrm_short_input (size:128b/16B) */
* This area must be 16B aligned.
*/
uint64_t req_addr;
-} __attribute__((packed));
+} __rte_packed;
/*
* Command numbering
#define HWRM_NVM_RAW_WRITE_BLK UINT32_C(0xffff)
#define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
uint16_t unused_0[3];
-} __attribute__((packed));
+} __rte_packed;
/* Return Codes */
/* ret_codes (size:64b/8B) */
#define HWRM_ERR_CODE_LAST \
HWRM_ERR_CODE_CMD_NOT_SUPPORTED
uint16_t unused_0[3];
-} __attribute__((packed));
+} __rte_packed;
/* Output */
/* hwrm_err_output (size:128b/16B) */
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
*/
uint8_t hwrm_intf_upd;
uint8_t unused_0[5];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_ver_get_output (size:1408b/176B) */
struct hwrm_ver_get_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* bd_base (size:64b/8B) */
struct bd_base {
#define BD_BASE_TYPE_TX_BD_LONG_INLINE UINT32_C(0x11)
#define BD_BASE_TYPE_LAST BD_BASE_TYPE_TX_BD_LONG_INLINE
uint8_t unused_1[7];
-} __attribute__((packed));
+} __rte_packed;
/* tx_bd_short (size:128b/16B) */
struct tx_bd_short {
* This value must be valid on all BDs of a packet.
*/
uint64_t address;
-} __attribute__((packed));
+} __rte_packed;
/* tx_bd_long (size:128b/16B) */
struct tx_bd_long {
* This value must be valid on all BDs of a packet.
*/
uint64_t address;
-} __attribute__((packed));
+} __rte_packed;
/* Last 16 bytes of tx_bd_long. */
/* tx_bd_long_hi (size:128b/16B) */
#define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
#define TX_BD_LONG_CFA_META_KEY_LAST \
TX_BD_LONG_CFA_META_KEY_VLAN_TAG
-} __attribute__((packed));
+} __rte_packed;
/*
* This structure is used to inform the NIC of packet data that needs to be
(UINT32_C(0x1) << 28)
#define TX_BD_LONG_INLINE_CFA_META_KEY_LAST \
TX_BD_LONG_INLINE_CFA_META_KEY_VLAN_TAG
-} __attribute__((packed));
+} __rte_packed;
/* tx_bd_empty (size:128b/16B) */
struct tx_bd_empty {
uint8_t unused_2;
uint8_t unused_3[3];
uint8_t unused_4[8];
-} __attribute__((packed));
+} __rte_packed;
/* rx_prod_pkt_bd (size:128b/16B) */
struct rx_prod_pkt_bd {
* be placed in host memory.
*/
uint64_t address;
-} __attribute__((packed));
+} __rte_packed;
/* rx_prod_bfr_bd (size:128b/16B) */
struct rx_prod_bfr_bd {
* be placed in host memory.
*/
uint64_t address;
-} __attribute__((packed));
+} __rte_packed;
/* rx_prod_agg_bd (size:128b/16B) */
struct rx_prod_agg_bd {
* be placed in host memory.
*/
uint64_t address;
-} __attribute__((packed));
+} __rte_packed;
/* cmpl_base (size:128b/16B) */
struct cmpl_base {
#define CMPL_BASE_INFO3_SFT 1
/* info4 is 32 b */
uint32_t info4;
-} __attribute__((packed));
+} __rte_packed;
/* tx_cmpl (size:128b/16B) */
struct tx_cmpl {
uint16_t unused_1;
/* unused3 is 32 b */
uint32_t unused_2;
-} __attribute__((packed));
+} __rte_packed;
/* rx_pkt_cmpl (size:128b/16B) */
struct rx_pkt_cmpl {
* based on the mode bits and key value in the VNIC.
*/
uint32_t rss_hash;
-} __attribute__((packed));
+} __rte_packed;
/* Last 16 bytes of rx_pkt_cmpl. */
/* rx_pkt_cmpl_hi (size:128b/16B) */
*/
#define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
#define RX_PKT_CMPL_REORDER_SFT 0
-} __attribute__((packed));
+} __rte_packed;
/*
* This TPA completion structure is used on devices where the
* based on the mode bits and key value in the VNIC.
*/
uint32_t rss_hash;
-} __attribute__((packed));
+} __rte_packed;
/*
* Last 16 bytes of rx_tpa_start_cmpl.
*/
#define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
#define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27
-} __attribute__((packed));
+} __rte_packed;
/*
* This TPA completion structure is used on devices where the
* option is present in the packet.
*/
uint32_t tsdelta;
-} __attribute__((packed));
+} __rte_packed;
/*
* Last 16 bytes of rx_tpa_end_cmpl.
* completion that corresponds to this TPA end completion.
*/
uint32_t start_opaque;
-} __attribute__((packed));
+} __rte_packed;
/*
* This TPA completion structure is used on devices where the
* based on the mode bits and key value in the VNIC.
*/
uint32_t rss_hash;
-} __attribute__((packed));
+} __rte_packed;
/*
* Last 16 bytes of rx_tpa_v2_start_cmpl.
*/
#define RX_TPA_V2_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
#define RX_TPA_V2_START_CMPL_INNER_L4_SIZE_SFT 27
-} __attribute__((packed));
+} __rte_packed;
/*
* This TPA completion structure is used on devices where the
* option is present in the packet.
*/
uint32_t tsdelta;
-} __attribute__((packed));
+} __rte_packed;
/*
* Last 16 bytes of rx_tpa_v2_end_cmpl.
* completion that corresponds to this TPA end completion.
*/
uint32_t start_opaque;
-} __attribute__((packed));
+} __rte_packed;
/*
* This TPA completion structure is used on devices where the
*/
uint16_t agg_id;
uint32_t unused_1;
-} __attribute__((packed));
+} __rte_packed;
/* rx_abuf_cmpl (size:128b/16B) */
struct rx_abuf_cmpl {
#define RX_ABUF_CMPL_V UINT32_C(0x1)
/* unused3 is 32 b */
uint32_t unused_2;
-} __attribute__((packed));
+} __rte_packed;
/* eject_cmpl (size:128b/16B) */
struct eject_cmpl {
uint16_t reserved16;
/* unused3 is 32 b */
uint32_t unused_2;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cmpl (size:128b/16B) */
struct hwrm_cmpl {
#define HWRM_CMPL_V UINT32_C(0x1)
/* unused4 is 32 b */
uint32_t unused_3;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_fwd_req_cmpl (size:128b/16B) */
struct hwrm_fwd_req_cmpl {
/* Address of forwarded request. */
#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe)
#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_fwd_resp_cmpl (size:128b/16B) */
struct hwrm_fwd_resp_cmpl {
/* Address of forwarded request. */
#define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK UINT32_C(0xfffffffe)
#define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl (size:128b/16B) */
struct hwrm_async_event_cmpl {
uint16_t timestamp_hi;
/* Event specific data */
uint32_t event_data1;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_status_change {
UINT32_C(0xff00000)
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT \
20
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_mtu_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK \
UINT32_C(0xffff)
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_speed_change {
UINT32_C(0xffff0000)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT \
16
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */
struct hwrm_async_event_cmpl_dcb_config_change {
(UINT32_C(0xff) << 24)
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST \
HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
struct hwrm_async_event_cmpl_port_conn_not_allowed {
(UINT32_C(0x3) << 16)
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST \
HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */
struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
UINT32_C(0xffff)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \
0
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_speed_cfg_change {
*/
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG \
UINT32_C(0x20000)
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_port_phy_cfg_change {
*/
#define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE \
UINT32_C(0x40000)
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
struct hwrm_async_event_cmpl_reset_notify {
UINT32_C(0xffff0000)
#define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT \
16
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
struct hwrm_async_event_cmpl_error_recovery {
*/
#define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED \
UINT32_C(0x2)
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */
struct hwrm_async_event_cmpl_func_drvr_unload {
UINT32_C(0xffff)
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT \
0
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */
struct hwrm_async_event_cmpl_func_drvr_load {
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \
UINT32_C(0xffff)
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */
struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
UINT32_C(0xffff)
#define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT \
0
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
struct hwrm_async_event_cmpl_pf_drvr_unload {
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK \
UINT32_C(0x70000)
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */
struct hwrm_async_event_cmpl_pf_drvr_load {
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK \
UINT32_C(0x70000)
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_flr {
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK \
UINT32_C(0xff0000)
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_mac_addr_change {
UINT32_C(0xffff)
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT \
0
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */
struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
*/
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED \
UINT32_C(0x1)
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
*/
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE \
UINT32_C(0x10)
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */
struct hwrm_async_event_cmpl_llfc_pfc_change {
UINT32_C(0x1fffe0)
#define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT \
5
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
struct hwrm_async_event_cmpl_default_vnic_change {
UINT32_C(0x3fffc00)
#define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT \
10
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
struct hwrm_async_event_cmpl_hw_flow_aged {
(UINT32_C(0x1) << 31)
#define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST \
HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
struct hwrm_async_event_cmpl_eem_cache_flush_req {
uint16_t timestamp_hi;
/* Event specific data */
uint32_t event_data1;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
struct hwrm_async_event_cmpl_eem_cache_flush_done {
UINT32_C(0xffff)
#define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT \
0
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_tcp_flag_action_change (size:128b/16B) */
struct hwrm_async_event_cmpl_tcp_flag_action_change {
uint16_t timestamp_hi;
/* Event specific data */
uint32_t event_data1;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_eem_flow_active (size:128b/16B) */
struct hwrm_async_event_cmpl_eem_flow_active {
(UINT32_C(0x1) << 31)
#define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_LAST \
HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_1
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_eem_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_eem_cfg_change {
*/
#define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_DATA1_EEM_RX_ENABLE \
UINT32_C(0x2)
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_quiesce_done (size:128b/16B) */
struct hwrm_async_event_cmpl_quiesce_done {
/* Time stamp for error event */
#define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA1_TIMESTAMP \
UINT32_C(0x1)
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_fw_trace_msg (size:128b/16B) */
struct hwrm_async_event_cmpl_fw_trace_msg {
#define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE9_MASK \
UINT32_C(0xff000000)
#define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE9_SFT 24
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
struct hwrm_async_event_cmpl_hwrm_error {
/* Time stamp for error event */
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP \
UINT32_C(0x1)
-} __attribute__((packed));
+} __rte_packed;
/*******************
* hwrm_func_reset *
#define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_LAST \
HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF
uint8_t unused_0;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_reset_output (size:128b/16B) */
struct hwrm_func_reset_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************
* hwrm_func_getfid *
*/
uint16_t pci_id;
uint8_t unused_0[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_getfid_output (size:128b/16B) */
struct hwrm_func_getfid_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_func_vf_alloc *
uint16_t first_vf_id;
/* The number of virtual functions requested. */
uint16_t num_vfs;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_vf_alloc_output (size:128b/16B) */
struct hwrm_func_vf_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************
* hwrm_func_vf_free *
* 0xFFFF - Cleanup all children of this PF.
*/
uint16_t num_vfs;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_vf_free_output (size:128b/16B) */
struct hwrm_func_vf_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************
* hwrm_func_vf_cfg *
/* The number of HW ring groups requested for the VF. */
uint16_t num_hw_ring_grps;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_vf_cfg_output (size:128b/16B) */
struct hwrm_func_vf_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************
* hwrm_func_qcaps *
*/
uint16_t fid;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_qcaps_output (size:640b/80B) */
struct hwrm_func_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************
* hwrm_func_qcfg *
*/
uint16_t fid;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_qcfg_output (size:704b/88B) */
struct hwrm_func_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************
* hwrm_func_cfg *
* be reserved for this function on the RX side.
*/
uint16_t num_mcast_filters;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_cfg_output (size:128b/16B) */
struct hwrm_func_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************
* hwrm_func_qstats *
#define HWRM_FUNC_QSTATS_INPUT_FLAGS_LAST \
HWRM_FUNC_QSTATS_INPUT_FLAGS_ROCE_ONLY
uint8_t unused_0[5];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_qstats_output (size:1408b/176B) */
struct hwrm_func_qstats_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_func_clr_stats *
*/
uint16_t fid;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_clr_stats_output (size:128b/16B) */
struct hwrm_func_clr_stats_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_func_vf_resc_free *
*/
uint16_t vf_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_vf_resc_free_output (size:128b/16B) */
struct hwrm_func_vf_resc_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_func_drv_rgtr *
uint16_t ver_upd;
/* This is the 16bit patch version of the driver. */
uint16_t ver_patch;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_drv_rgtr_output (size:128b/16B) */
struct hwrm_func_drv_rgtr_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/************************
* hwrm_func_drv_unrgtr *
#define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
UINT32_C(0x1)
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
struct hwrm_func_drv_unrgtr_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_func_buf_rgtr *
* HWRM.
*/
uint64_t resp_buf_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_buf_rgtr_output (size:128b/16B) */
struct hwrm_func_buf_rgtr_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/************************
* hwrm_func_buf_unrgtr *
*/
uint16_t vf_id;
uint8_t unused_0[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_buf_unrgtr_output (size:128b/16B) */
struct hwrm_func_buf_unrgtr_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_func_drv_qver *
*/
uint16_t fid;
uint8_t unused_0[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_drv_qver_output (size:256b/32B) */
struct hwrm_func_drv_qver_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/****************************
* hwrm_func_resource_qcaps *
*/
uint16_t fid;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_resource_qcaps_output (size:448b/56B) */
struct hwrm_func_resource_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************************
* hwrm_func_backing_store_qcaps *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */
struct hwrm_func_backing_store_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_func_backing_store_cfg *
uint16_t mrav_entry_size;
/* Number of bytes that have been allocated for each context entry. */
uint16_t tim_entry_size;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
struct hwrm_func_backing_store_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************************
* hwrm_func_backing_store_qcfg *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */
struct hwrm_func_backing_store_qcfg_output {
* is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/****************************
* hwrm_error_recovery_qcfg *
*/
uint64_t resp_addr;
uint8_t unused_0[8];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
struct hwrm_error_recovery_qcfg_output {
* is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_func_vlan_qcfg *
*/
uint16_t fid;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_vlan_qcfg_output (size:320b/40B) */
struct hwrm_func_vlan_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_func_vlan_cfg *
/* Future use. */
uint32_t rsvd2;
uint8_t unused_3[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_vlan_cfg_output (size:128b/16B) */
struct hwrm_func_vlan_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_func_vf_vnic_ids_query *
uint32_t max_vnic_id_cnt;
/* This is the address for VF VNIC ID table */
uint64_t vnic_id_tbl_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
struct hwrm_func_vf_vnic_ids_query_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_func_vf_bw_cfg *
(UINT32_C(0xf) << 12)
#define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_LAST \
HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_100
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_vf_bw_cfg_output (size:128b/16B) */
struct hwrm_func_vf_bw_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/************************
* hwrm_func_vf_bw_qcfg *
/* The physical VF id of interest */
#define HWRM_FUNC_VF_BW_QCFG_INPUT_VFN_VFID_MASK UINT32_C(0xfff)
#define HWRM_FUNC_VF_BW_QCFG_INPUT_VFN_VFID_SFT 0
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_vf_bw_qcfg_output (size:960b/120B) */
struct hwrm_func_vf_bw_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_func_drv_if_change *
*/
#define HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP UINT32_C(0x1)
uint32_t unused;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_drv_if_change_output (size:128b/16B) */
struct hwrm_func_drv_if_change_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_func_host_pf_ids_query *
#define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_FILTER_LAST \
HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_FILTER_ROCE
uint8_t unused_1[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_func_host_pf_ids_query_output (size:128b/16B) */
struct hwrm_func_host_pf_ids_query_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************
* hwrm_port_phy_cfg *
#define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
#define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
uint32_t unused_3;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_phy_cfg_output (size:128b/16B) */
struct hwrm_port_phy_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
struct hwrm_port_phy_cfg_cmd_err {
#define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_LAST \
HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_port_phy_qcfg *
/* Port ID of port that is to be queried. */
uint16_t port_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_phy_qcfg_output (size:768b/96B) */
struct hwrm_port_phy_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************
* hwrm_port_mac_cfg *
*/
int32_t ptp_freq_adj_ppb;
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_mac_cfg_output (size:128b/16B) */
struct hwrm_port_mac_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_port_mac_qcfg *
/* Port ID of port that is to be configured. */
uint16_t port_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_mac_qcfg_output (size:192b/24B) */
struct hwrm_port_mac_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_port_mac_ptp_qcfg *
/* Port ID of port that is being queried. */
uint16_t port_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
struct hwrm_port_mac_ptp_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* Port Tx Statistics Formats */
/* tx_port_stats (size:3264b/408B) */
uint64_t tx_stat_discard;
/* Total Tx Error Drops per Port reported by STATS block */
uint64_t tx_stat_error;
-} __attribute__((packed));
+} __rte_packed;
/* Port Rx Statistics Formats */
/* rx_port_stats (size:4224b/528B) */
/* Total Rx Discards per Port reported by STATS block */
uint64_t rx_stat_discard;
uint64_t rx_stat_err;
-} __attribute__((packed));
+} __rte_packed;
/********************
* hwrm_port_qstats *
* Rx port statistics will be stored
*/
uint64_t rx_stat_host_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_qstats_output (size:128b/16B) */
struct hwrm_port_qstats_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* Port Tx Statistics extended Formats */
/* tx_port_stats_ext (size:2048b/256B) */
uint64_t pfc_pri7_tx_duration_us;
/* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 7 */
uint64_t pfc_pri7_tx_transitions;
-} __attribute__((packed));
+} __rte_packed;
/* Port Rx Statistics extended Formats */
/* rx_port_stats_ext (size:3648b/456B) */
uint64_t rx_discard_packets_cos6;
/* Total number of rx discard packets count on cos queue 7 */
uint64_t rx_discard_packets_cos7;
-} __attribute__((packed));
+} __rte_packed;
/************************
* hwrm_port_qstats_ext *
* Rx port statistics will be stored
*/
uint64_t rx_stat_host_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_qstats_ext_output (size:128b/16B) */
struct hwrm_port_qstats_ext_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_port_lpbk_qstats *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
struct hwrm_port_lpbk_qstats_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_port_clr_stats *
*/
#define HWRM_PORT_CLR_STATS_INPUT_FLAGS_ROCE_COUNTERS UINT32_C(0x1)
uint8_t unused_0[5];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_clr_stats_output (size:128b/16B) */
struct hwrm_port_clr_stats_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_port_phy_qcaps *
/* Port ID of port that is being queried. */
uint16_t port_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_phy_qcaps_output (size:192b/24B) */
struct hwrm_port_phy_qcaps_output {
#define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_MASK \
UINT32_C(0xff000000)
#define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_SFT 24
-} __attribute__((packed));
+} __rte_packed;
/****************************
* hwrm_port_phy_mdio_write *
uint8_t cl45_mdio;
/* */
uint8_t unused_1[7];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
struct hwrm_port_phy_mdio_write_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_port_phy_mdio_read *
uint8_t cl45_mdio;
/* */
uint8_t unused_1;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
struct hwrm_port_phy_mdio_read_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************
* hwrm_port_led_cfg *
uint8_t led3_group_id;
/* Reserved field. */
uint8_t rsvd3;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_led_cfg_output (size:128b/16B) */
struct hwrm_port_led_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_port_led_qcfg *
/* Port ID of port whose LED configuration is being queried. */
uint16_t port_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_led_qcfg_output (size:448b/56B) */
struct hwrm_port_led_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_port_led_qcaps *
/* Port ID of port whose LED configuration is being queried. */
uint16_t port_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_led_qcaps_output (size:384b/48B) */
struct hwrm_port_led_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_port_prbs_test *
* bit1 = lane1 ..bit31 = lane31
*/
uint32_t rx_lane_map;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_prbs_test_output (size:128b/16B) */
struct hwrm_port_prbs_test_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_port_dsc_dump *
*/
#define HWRM_PORT_DSC_DUMP_INPUT_DSC_DUMP_CONFIG_START_RETRIEVE \
UINT32_C(0x1)
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_dsc_dump_output (size:128b/16B) */
struct hwrm_port_dsc_dump_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************************
* hwrm_port_sfp_sideband_cfg *
/* When this bit is set to '1', the module will be powered down. */
#define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_FLAGS_PWR_DIS \
UINT32_C(0x40)
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_sfp_sideband_cfg_output (size:128b/16B) */
struct hwrm_port_sfp_sideband_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_port_sfp_sideband_qcfg *
/* Port ID of port that is to be queried. */
uint16_t port_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_sfp_sideband_qcfg_output (size:192b/24B) */
struct hwrm_port_sfp_sideband_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************************
* hwrm_port_phy_mdio_bus_acquire *
*/
uint16_t mdio_bus_timeout;
uint8_t unused_0[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_phy_mdio_bus_acquire_output (size:128b/16B) */
struct hwrm_port_phy_mdio_bus_acquire_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************************
* hwrm_port_phy_mdio_bus_release *
*/
uint16_t client_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_phy_mdio_bus_release_output (size:128b/16B) */
struct hwrm_port_phy_mdio_bus_release_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_queue_qportcfg *
#define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_LAST \
HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED
uint8_t unused_0;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_qportcfg_output (size:256b/32B) */
struct hwrm_queue_qportcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************
* hwrm_queue_qcfg *
HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX
/* Queue ID of the queue. */
uint32_t queue_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_qcfg_output (size:128b/16B) */
struct hwrm_queue_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************
* hwrm_queue_cfg *
#define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LAST \
HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_cfg_output (size:128b/16B) */
struct hwrm_queue_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************************
* hwrm_queue_pfcenable_qcfg *
*/
uint16_t port_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/****************************
* hwrm_queue_pfcenable_cfg *
*/
uint16_t port_id;
uint8_t unused_0[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_queue_pri2cos_qcfg *
*/
uint8_t port_id;
uint8_t unused_0[3];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
struct hwrm_queue_pri2cos_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_queue_pri2cos_cfg *
*/
uint8_t pri7_cos_queue_id;
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
struct hwrm_queue_pri2cos_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_queue_cos2bw_qcfg *
*/
uint16_t port_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
struct hwrm_queue_cos2bw_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_queue_cos2bw_cfg *
*/
uint8_t queue_id7_bw_weight;
uint8_t unused_1[5];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
struct hwrm_queue_cos2bw_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_queue_dscp_qcaps *
*/
uint8_t port_id;
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
struct hwrm_queue_dscp_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/****************************
* hwrm_queue_dscp2pri_qcfg *
/* Size of the buffer pointed to by dest_data_addr. */
uint16_t dest_data_buffer_size;
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_queue_dscp2pri_cfg *
*/
uint16_t entry_cnt;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_queue_mpls_qcaps *
*/
uint8_t port_id;
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_mpls_qcaps_output (size:128b/16B) */
struct hwrm_queue_mpls_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************************
* hwrm_queue_mplstc2pri_qcfg *
*/
uint8_t port_id;
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_mplstc2pri_qcfg_output (size:192b/24B) */
struct hwrm_queue_mplstc2pri_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************************
* hwrm_queue_mplstc2pri_cfg *
* be changed before traffic has started.
*/
uint8_t tc7_pri_queue_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_queue_mplstc2pri_cfg_output (size:128b/16B) */
struct hwrm_queue_mplstc2pri_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************
* hwrm_vnic_alloc *
*/
#define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_alloc_output (size:128b/16B) */
struct hwrm_vnic_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************
* hwrm_vnic_free *
/* Logical vnic ID */
uint32_t vnic_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_free_output (size:128b/16B) */
struct hwrm_vnic_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************
* hwrm_vnic_cfg *
*/
uint16_t queue_id;
uint8_t unused0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_cfg_output (size:128b/16B) */
struct hwrm_vnic_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************
* hwrm_vnic_qcfg *
/* ID of Virtual Function whose VNIC resource is being queried. */
uint16_t vf_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_qcfg_output (size:256b/32B) */
struct hwrm_vnic_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************
* hwrm_vnic_qcaps *
uint64_t resp_addr;
uint32_t enables;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_qcaps_output (size:192b/24B) */
struct hwrm_vnic_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************
* hwrm_vnic_tpa_cfg *
* and can be queried using hwrm_vnic_tpa_qcfg.
*/
uint32_t min_agg_len;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
struct hwrm_vnic_tpa_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************
* hwrm_vnic_rss_cfg *
/* Index to the rss indirection table. */
uint16_t rss_ctx_idx;
uint8_t unused_1[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
struct hwrm_vnic_rss_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
struct hwrm_vnic_rss_cfg_cmd_err {
#define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_LAST \
HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_vnic_rss_qcfg *
/* Index to the rss indirection table. */
uint16_t rss_ctx_idx;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
struct hwrm_vnic_rss_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_vnic_plcmodes_cfg *
*/
uint16_t hds_threshold;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
struct hwrm_vnic_plcmodes_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_vnic_plcmodes_qcfg *
/* Logical vnic ID */
uint32_t vnic_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */
struct hwrm_vnic_plcmodes_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************************
* hwrm_vnic_rss_cos_lb_ctx_alloc *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************************
* hwrm_vnic_rss_cos_lb_ctx_free *
/* rss_cos_lb_ctx_id is 16 b */
uint16_t rss_cos_lb_ctx_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************
* hwrm_ring_alloc *
* record.
*/
uint64_t cq_handle;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_ring_alloc_output (size:128b/16B) */
struct hwrm_ring_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************
* hwrm_ring_free *
/* Physical number of ring allocated. */
uint16_t ring_id;
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_ring_free_output (size:128b/16B) */
struct hwrm_ring_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************
* hwrm_ring_reset *
/* Physical number of the ring. */
uint16_t ring_id;
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_ring_reset_output (size:128b/16B) */
struct hwrm_ring_reset_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_ring_aggint_qcaps *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
struct hwrm_ring_aggint_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************************
* hwrm_ring_cmpl_ring_qaggint_params *
/* Physical number of completion ring. */
uint16_t ring_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
struct hwrm_ring_cmpl_ring_qaggint_params_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************************************
* hwrm_ring_cmpl_ring_cfg_aggint_params *
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_AGGR_INT \
UINT32_C(0x20)
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_ring_grp_alloc *
* with the ring group.
*/
uint16_t sc;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_ring_grp_alloc_output (size:128b/16B) */
struct hwrm_ring_grp_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_ring_grp_free *
/* This is the ring group ID value. */
uint32_t ring_group_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_ring_grp_free_output (size:128b/16B) */
struct hwrm_ring_grp_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*
* special reserved flow ID to identify per function default
* flows for vSwitch offload
* 2 - Below the given filter
*/
uint64_t l2_filter_id_hint;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_l2_filter_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_cfa_l2_filter_free *
* context.
*/
uint64_t l2_filter_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_cfa_l2_filter_cfg *
* mirrored.
*/
uint32_t new_mirror_vnic_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_cfa_l2_set_rx_mask *
*/
uint32_t num_vlan_tags;
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
struct hwrm_cfa_l2_set_rx_mask_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
struct hwrm_cfa_l2_set_rx_mask_cmd_err {
#define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST \
HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_cfa_vlan_antispoof_cfg *
* for the 12-bit VLAN ID.
*/
uint64_t vlan_tag_mask_tbl_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */
struct hwrm_cfa_vlan_antispoof_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************************
* hwrm_cfa_vlan_antispoof_qcfg *
* the mask value should be 0xfff for the 12-bit VLAN ID.
*/
uint64_t vlan_tag_mask_tbl_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */
struct hwrm_cfa_vlan_antispoof_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************************
* hwrm_cfa_tunnel_filter_alloc *
* mirrored.
*/
uint32_t mirror_vnic_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_tunnel_filter_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_cfa_tunnel_filter_free *
uint64_t resp_addr;
/* This value is an opaque id into CFA data structures. */
uint64_t tunnel_filter_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
struct hwrm_cfa_tunnel_filter_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************************
* hwrm_cfa_redirect_tunnel_type_alloc *
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_FLAGS_MODIFY_DST \
UINT32_C(0x1)
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */
struct hwrm_cfa_redirect_tunnel_type_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************************
* hwrm_cfa_redirect_tunnel_type_free *
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_LAST \
HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL
uint8_t unused_0[5];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */
struct hwrm_cfa_redirect_tunnel_type_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************************
* hwrm_cfa_redirect_tunnel_type_info *
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_LAST \
HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL
uint8_t unused_0[5];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */
struct hwrm_cfa_redirect_tunnel_type_info_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
struct hwrm_vxlan_ipv4_hdr {
uint32_t src_ip_addr;
/* IPv4 destination address. */
uint32_t dest_ip_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
struct hwrm_vxlan_ipv6_hdr {
uint32_t src_ip_addr[4];
/* IPv6 destination address. */
uint32_t dest_ip_addr[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
struct hwrm_cfa_encap_data_vxlan {
/* VXLAN header flags field. */
uint8_t hdr_flags;
uint8_t unused[3];
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_cfa_encap_record_alloc *
uint8_t unused_0[3];
/* This value is encap data used for the given encap type. */
uint32_t encap_data[20];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
struct hwrm_cfa_encap_record_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************************
* hwrm_cfa_encap_record_free *
/* This value is an opaque id into CFA data structures. */
uint32_t encap_record_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
struct hwrm_cfa_encap_record_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************************
* hwrm_cfa_ntuple_filter_alloc *
* the pri_hint.
*/
uint64_t ntuple_filter_id_hint;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST \
HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_cfa_ntuple_filter_free *
uint64_t resp_addr;
/* This value is an opaque id into CFA data structures. */
uint64_t ntuple_filter_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************************
* hwrm_cfa_ntuple_filter_cfg *
#define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \
HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID
uint8_t unused_1[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_cfa_em_flow_alloc *
/* Logical ID of the encapsulation record. */
uint32_t encap_record_id;
uint8_t unused_2[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */
struct hwrm_cfa_em_flow_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_cfa_em_flow_free *
uint64_t resp_addr;
/* This value is an opaque id into CFA data structures. */
uint64_t em_filter_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_em_flow_free_output (size:128b/16B) */
struct hwrm_cfa_em_flow_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/************************
* hwrm_cfa_meter_qcaps *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_meter_qcaps_output (size:320b/40B) */
struct hwrm_cfa_meter_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************************
* hwrm_cfa_meter_profile_alloc *
(UINT32_C(0x7) << 29)
#define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */
struct hwrm_cfa_meter_profile_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_cfa_meter_profile_free *
#define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_LAST \
HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */
struct hwrm_cfa_meter_profile_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************************
* hwrm_cfa_meter_profile_cfg *
(UINT32_C(0x7) << 29)
#define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */
struct hwrm_cfa_meter_profile_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************************
* hwrm_cfa_meter_instance_alloc *
#define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_LAST \
HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */
struct hwrm_cfa_meter_instance_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_cfa_meter_instance_cfg *
*/
uint16_t meter_instance_id;
uint8_t unused_1[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_meter_instance_cfg_output (size:128b/16B) */
struct hwrm_cfa_meter_instance_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************************
* hwrm_cfa_meter_instance_free *
#define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_LAST \
HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */
struct hwrm_cfa_meter_instance_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************************
* hwrm_cfa_decap_filter_alloc *
* information of the decap filter.
*/
uint16_t l2_ctxt_ref_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************************
* hwrm_cfa_decap_filter_free *
/* This value is an opaque id into CFA data structures. */
uint32_t decap_filter_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_cfa_flow_alloc *
UINT32_C(0xff)
#define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_LAST \
HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
struct hwrm_cfa_flow_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_flow_alloc_cmd_err {
#define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST \
HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_cfa_flow_free *
uint32_t flow_counter_id;
/* This value identifies a set of CFA data structures used for a flow. */
uint64_t ext_flow_handle;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_free_output (size:256b/32B) */
struct hwrm_cfa_flow_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_action_data (size:960b/120B) */
struct hwrm_cfa_flow_action_data {
uint8_t unused[7];
/* This value is encap data for the associated encap type. */
uint32_t encap_data[20];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_tunnel_hdr_data (size:64b/8B) */
struct hwrm_cfa_flow_tunnel_hdr_data {
* Virtual Network Identifier (VNI).
*/
uint32_t tunnel_id;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_l4_key_data (size:64b/8B) */
struct hwrm_cfa_flow_l4_key_data {
/* The value of destination port. */
uint16_t l4_dst_port;
uint32_t unused;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_l3_key_data (size:512b/64B) */
struct hwrm_cfa_flow_l3_key_data {
/* NAT IPv4/IPv6 address. */
uint32_t nat_ip_address[4];
uint32_t unused[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_l2_key_data (size:448b/56B) */
struct hwrm_cfa_flow_l2_key_data {
/* Inner VLAN TCI. */
uint16_t ivlan_tci;
uint8_t unused[8];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_key_data (size:4160b/520B) */
struct hwrm_cfa_flow_key_data {
uint32_t l4_key_data[2];
/* Flow associated L4 header mask info. */
uint32_t l4_key_mask[2];
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_cfa_flow_info *
uint8_t unused_0[6];
/* This value identifies a set of CFA data structures used for a flow. */
uint64_t ext_flow_handle;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_info_output (size:5632b/704B) */
struct hwrm_cfa_flow_info_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_cfa_flow_flush *
uint16_t num_flows;
/* Pointer to the PBL, or PDL depending on number of levels */
uint64_t page_dir;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_flush_output (size:128b/16B) */
struct hwrm_cfa_flow_flush_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_cfa_flow_stats *
uint32_t flow_id_8;
/* Flow ID of a flow. */
uint32_t flow_id_9;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
struct hwrm_cfa_flow_stats_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************************
* hwrm_cfa_flow_aging_timer_reset *
uint32_t flow_timer;
/* This value identifies a set of CFA data structures used for a flow. */
uint64_t ext_flow_handle;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_aging_timer_reset_output (size:128b/16B) */
struct hwrm_cfa_flow_aging_timer_reset_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_cfa_flow_aging_cfg *
#define HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_LAST \
HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_EJECTION_DATA
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_aging_cfg_output (size:128b/16B) */
struct hwrm_cfa_flow_aging_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/****************************
* hwrm_cfa_flow_aging_qcfg *
#define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_LAST \
HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_RX
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_aging_qcfg_output (size:320b/40B) */
struct hwrm_cfa_flow_aging_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************************
* hwrm_cfa_flow_aging_qcaps *
#define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_LAST \
HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_RX
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_flow_aging_qcaps_output (size:256b/32B) */
struct hwrm_cfa_flow_aging_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************************
* hwrm_cfa_tcp_flag_process_qcfg *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_tcp_flag_process_qcfg_output (size:192b/24B) */
struct hwrm_cfa_tcp_flag_process_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_cfa_pair_info *
uint8_t pair_vfid;
/* Pair name (32 byte string). */
char pair_name[32];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_pair_info_output (size:576b/72B) */
struct hwrm_cfa_pair_info_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************************
* hwrm_cfa_redirect_query_tunnel_type *
/* The source function id. */
uint16_t src_fid;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_redirect_query_tunnel_type_output (size:128b/16B) */
struct hwrm_cfa_redirect_query_tunnel_type_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_cfa_ctx_mem_rgtr *
uint32_t unused_0;
/* Pointer to the PBL, or PDL depending on number of levels */
uint64_t page_dir;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_ctx_mem_rgtr_output (size:128b/16B) */
struct hwrm_cfa_ctx_mem_rgtr_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_cfa_ctx_mem_unrgtr *
*/
uint16_t ctx_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_ctx_mem_unrgtr_output (size:128b/16B) */
struct hwrm_cfa_ctx_mem_unrgtr_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_cfa_ctx_mem_qctx *
*/
uint16_t ctx_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_ctx_mem_qctx_output (size:256b/32B) */
struct hwrm_cfa_ctx_mem_qctx_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_cfa_ctx_mem_qcaps *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_ctx_mem_qcaps_output (size:128b/16B) */
struct hwrm_cfa_ctx_mem_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_cfa_eem_qcaps *
#define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD \
UINT32_C(0x4)
uint32_t unused_0;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
struct hwrm_cfa_eem_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************
* hwrm_cfa_eem_cfg *
uint16_t fid_ctx_id;
uint16_t unused_2;
uint32_t unused_3;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
struct hwrm_cfa_eem_cfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************
* hwrm_cfa_eem_qcfg *
/* When set to 1, indicates the configuration is the RX flow. */
#define HWRM_CFA_EEM_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x2)
uint32_t unused_0;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
struct hwrm_cfa_eem_qcfg_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*******************
* hwrm_cfa_eem_op *
#define HWRM_CFA_EEM_OP_INPUT_OP_EEM_CLEANUP UINT32_C(0x3)
#define HWRM_CFA_EEM_OP_INPUT_OP_LAST \
HWRM_CFA_EEM_OP_INPUT_OP_EEM_CLEANUP
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_eem_op_output (size:128b/16B) */
struct hwrm_cfa_eem_op_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************************
* hwrm_cfa_adv_flow_mgnt_qcaps *
*/
uint64_t resp_addr;
uint32_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************
* hwrm_cfa_tflib *
uint8_t unused0[4];
/* TFLIB request data. */
uint32_t tf_req[26];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_cfa_tflib_output (size:5632b/704B) */
struct hwrm_cfa_tflib_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************************
* hwrm_tunnel_dst_port_query *
#define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST \
HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_query_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************************
* hwrm_tunnel_dst_port_alloc *
*/
uint16_t tunnel_dst_port_val;
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************************
* hwrm_tunnel_dst_port_free *
*/
uint16_t tunnel_dst_port_id;
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* Periodic statistics context DMA to host. */
/* ctx_hw_stats (size:1280b/160B) */
uint64_t tpa_events;
/* Number of TPA aborts */
uint64_t tpa_aborts;
-} __attribute__((packed));
+} __rte_packed;
/* Periodic statistics context DMA to host. */
/* ctx_hw_stats_ext (size:1344b/168B) */
uint64_t rx_tpa_bytes;
/* Number of TPA errors */
uint64_t rx_tpa_errors;
-} __attribute__((packed));
+} __rte_packed;
/* Periodic Engine statistics context DMA to host. */
/* ctx_eng_stats (size:512b/64B) */
* the unit is count of clock cycles
*/
uint64_t cdd_engine_usage;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_stat_ctx_alloc *
* for the periodic DMA updates.
*/
uint16_t stats_dma_length;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
struct hwrm_stat_ctx_alloc_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_stat_ctx_free *
/* ID of the statistics context that is being queried. */
uint32_t stat_ctx_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_stat_ctx_free_output (size:128b/16B) */
struct hwrm_stat_ctx_free_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***********************
* hwrm_stat_ctx_query *
/* ID of the statistics context that is being queried. */
uint32_t stat_ctx_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_stat_ctx_query_output (size:1408b/176B) */
struct hwrm_stat_ctx_query_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_stat_ctx_eng_query *
/* ID of the statistics context that is being queried. */
uint32_t stat_ctx_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */
struct hwrm_stat_ctx_eng_query_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_stat_ctx_clr_stats *
/* ID of the statistics context that is being queried. */
uint32_t stat_ctx_id;
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
struct hwrm_stat_ctx_clr_stats_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/********************
* hwrm_pcie_qstats *
* PCIe statistics will be stored
*/
uint64_t pcie_stat_host_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_pcie_qstats_output (size:128b/16B) */
struct hwrm_pcie_qstats_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* PCIe Statistics Formats */
/* pcie_ctx_hw_stats (size:768b/96B) */
* to Recovery
*/
uint64_t pcie_recovery_histogram;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_exec_fwd_resp *
*/
uint16_t encap_resp_target_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_exec_fwd_resp_output (size:128b/16B) */
struct hwrm_exec_fwd_resp_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/************************
* hwrm_reject_fwd_resp *
*/
uint16_t encap_resp_target_id;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_reject_fwd_resp_output (size:128b/16B) */
struct hwrm_reject_fwd_resp_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************
* hwrm_fwd_resp *
uint64_t encap_resp_addr;
/* This is an encapsulated response. */
uint32_t encap_resp[24];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_fwd_resp_output (size:128b/16B) */
struct hwrm_fwd_resp_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************************
* hwrm_fwd_async_event_cmpl *
uint8_t unused_0[6];
/* This is an encapsulated asynchronous event completion. */
uint32_t encap_async_event_cmpl[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
struct hwrm_fwd_async_event_cmpl_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_nvm_raw_write_blk *
uint32_t dest_addr;
/* Length of data to be written, in bytes. */
uint32_t len;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */
struct hwrm_nvm_raw_write_blk_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*****************
* hwrm_nvm_read *
/* The length of the data to be read, in bytes. */
uint32_t len;
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_read_output (size:128b/16B) */
struct hwrm_nvm_read_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*********************
* hwrm_nvm_raw_dump *
uint32_t offset;
/* Total length of NVRAM contents to be read, in bytes. */
uint32_t len;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_raw_dump_output (size:128b/16B) */
struct hwrm_nvm_raw_dump_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/****************************
* hwrm_nvm_get_dir_entries *
* This is the host address where the directory will be written.
*/
uint64_t host_dest_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
struct hwrm_nvm_get_dir_entries_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_nvm_get_dir_info *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
struct hwrm_nvm_get_dir_info_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/******************
* hwrm_nvm_write *
*/
uint32_t dir_item_length;
uint32_t unused_0;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_write_output (size:128b/16B) */
struct hwrm_nvm_write_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_write_cmd_err (size:64b/8B) */
struct hwrm_nvm_write_cmd_err {
#define HWRM_NVM_WRITE_CMD_ERR_CODE_LAST \
HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/*******************
* hwrm_nvm_modify *
*/
uint32_t len;
uint8_t unused_1[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_modify_output (size:128b/16B) */
struct hwrm_nvm_modify_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_nvm_find_dir_entry *
#define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_LAST \
HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT
uint8_t unused_0[3];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
struct hwrm_nvm_find_dir_entry_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/****************************
* hwrm_nvm_erase_dir_entry *
/* Directory Entry Index */
uint16_t dir_idx;
uint8_t unused_0[6];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_erase_dir_entry_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_nvm_get_dev_info *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
struct hwrm_nvm_get_dev_info_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_nvm_mod_dir_entry *
* value of the content in the directory entry.
*/
uint32_t checksum;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_mod_dir_entry_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**************************
* hwrm_nvm_verify_update *
*/
uint16_t dir_ext;
uint8_t unused_0[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_verify_update_output (size:128b/16B) */
struct hwrm_nvm_verify_update_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/***************************
* hwrm_nvm_install_update *
#define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ALLOWED_TO_DEFRAG \
UINT32_C(0x4)
uint8_t unused_0[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_install_update_output (size:192b/24B) */
struct hwrm_nvm_install_update_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
#define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST \
HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/******************
* hwrm_nvm_flush *
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_flush_output (size:128b/16B) */
struct hwrm_nvm_flush_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_flush_cmd_err (size:64b/8B) */
struct hwrm_nvm_flush_cmd_err {
#define HWRM_NVM_FLUSH_CMD_ERR_CODE_LAST \
HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_nvm_get_variable *
#define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT \
UINT32_C(0x1)
uint8_t unused_0;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_get_variable_output (size:128b/16B) */
struct hwrm_nvm_get_variable_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_get_variable_cmd_err {
#define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LAST \
HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/*************************
* hwrm_nvm_set_variable *
#define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FACTORY_DEFAULT \
UINT32_C(0x80)
uint8_t unused_0;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_set_variable_output (size:128b/16B) */
struct hwrm_nvm_set_variable_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_set_variable_cmd_err {
#define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_LAST \
HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/****************************
* hwrm_nvm_validate_option *
/* index for the 4th dimensions */
uint16_t index_3;
uint8_t unused_0[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_validate_option_output (size:128b/16B) */
struct hwrm_nvm_validate_option_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */
struct hwrm_nvm_validate_option_cmd_err {
#define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST \
HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN
uint8_t unused_0[7];
-} __attribute__((packed));
+} __rte_packed;
/*****************
* hwrm_fw_reset *
*/
#define HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL UINT32_C(0x1)
uint8_t unused_0[4];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_fw_reset_output (size:128b/16B) */
struct hwrm_fw_reset_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
/**********************
* hwrm_port_ts_query *
/* Port ID of port that is being queried. */
uint16_t port_id;
uint8_t unused_0[2];
-} __attribute__((packed));
+} __rte_packed;
/* hwrm_port_ts_query_output (size:192b/24B) */
struct hwrm_port_ts_query_output {
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} __attribute__((packed));
+} __rte_packed;
#endif /* _HSI_STRUCT_DEF_DPDK_H_ */