uint64_t next_valid;
uint64_t key[4];
/* Cache line 1 */
- uint8_t data[0];
+ uint8_t data[];
};
#if RTE_TABLE_HASH_LRU_STRATEGY == 3
struct netcfg_info {
uint8_t num_ethports;
/**< Number of ports */
- struct fm_eth_port_cfg port_cfg[0];
+ struct fm_eth_port_cfg port_cfg[];
/**< Variable structure array of size num_ethports */
};
struct netcfg_interface {
uint8_t numof_netcfg_interface;
uint8_t numof_fman_enabled_macless;
- struct interface_info interface_info[0];
+ struct interface_info interface_info[];
};
/* pcd_file: FMC netpcd XML ("policy") file, that contains PCD information.
* Ring data starts here + RingDataStartOffset
* !!! DO NOT place any fields below this !!!
*/
- uint8_t data[0];
+ uint8_t data[];
} __rte_packed;
/*
struct vmbus_gpa_range {
uint32_t len;
uint32_t ofs;
- uint64_t page[0];
+ uint64_t page[];
} __rte_packed;
/* This is actually vmbus_gpa_range.gpa_page[1] */
/* IOV Pointer */
struct roc_se_iov_ptr {
int buf_cnt;
- struct roc_se_buf_ptr bufs[0];
+ struct roc_se_buf_ptr bufs[];
};
struct roc_se_fc_params {
struct dpaax_iova_table {
unsigned int count; /**< No. of blocks of contiguous physical pages */
- struct dpaax_iovat_element entries[0];
+ struct dpaax_iovat_element entries[];
};
/* Pointer to the table, which is common for DPAA/DPAA2 and only a single
struct mlx5_rdma_write_wqe {
struct mlx5_wqe_cseg ctr;
struct mlx5_wqe_rseg rseg;
- struct mlx5_wqe_dseg dseg[0];
+ struct mlx5_wqe_dseg dseg[];
} __rte_packed;
#ifdef PEDANTIC
#endif
struct mlx5_ifc_qpc_extension_and_pas_list_bits {
struct mlx5_ifc_qpc_extension_bits qpc_data_extension;
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
u8 reserved_at_800[0x80];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
- u8 register_data[0][0x20];
+ u8 register_data[][0x20];
};
struct mlx5_ifc_access_register_in_bits {
u8 reserved_at_40[0x10];
u8 register_id[0x10];
u8 argument[0x20];
- u8 register_data[0][0x20];
+ u8 register_data[][0x20];
};
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
/**< PMD type */
uint32_t max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- __extension__ uint8_t priv[0];
+ __extension__ uint8_t priv[];
};
/** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
/* Multi buffer manager */
const struct rte_memzone *mb_mgr_mz;
/* Shared memzone for storing mb_mgr */
- __extension__ uint8_t additional_data[0];
+ __extension__ uint8_t additional_data[];
/**< Storing PMD specific additional data */
};
struct vring_avail {
uint16_t flags;
uint16_t idx;
- uint16_t ring[0];
+ uint16_t ring[];
};
/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
struct vring_used {
uint16_t flags;
volatile uint16_t idx;
- struct vring_used_elem ring[0];
+ struct vring_used_elem ring[];
};
struct vring {
uint16_t *notify_addr;
- struct vq_desc_extra vq_descx[0];
+ struct vq_desc_extra vq_descx[];
};
/**
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[0];
+ u8 buf[];
} __rte_packed;
struct smbus_request {
unsigned int clipt_start; /* start index of CLIP table */
unsigned int clipt_size; /* size of CLIP table */
rte_rwlock_t lock; /* table rw lock */
- struct clip_entry cl_list[0]; /* MUST BE LAST */
+ struct clip_entry cl_list[]; /* MUST BE LAST */
};
struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
unsigned int l2t_start; /* start index of our piece of the L2T */
unsigned int l2t_size; /* number of entries in l2tab */
rte_rwlock_t lock; /* table rw lock */
- struct l2t_entry l2tab[0]; /* MUST BE LAST */
+ struct l2t_entry l2tab[]; /* MUST BE LAST */
};
#define L2T_LPBK true
* free_idx cannot alone determine
* if the table is full
*/
- struct mps_tcam_entry entry[0];
+ struct mps_tcam_entry entry[];
};
struct mpstcam_table *t4_init_mpstcam(struct adapter *adap);
unsigned int smt_size;
unsigned int smt_start;
rte_rwlock_t lock;
- struct smt_entry smtab[0];
+ struct smt_entry smtab[];
};
struct smt_data *t4_init_smt(u32 smt_start_idx, u32 smt_size);
struct filter_tlv {
uint32_t type;
uint32_t length;
- uint32_t val[0];
+ uint32_t val[];
};
/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
struct hinic_sq_task task;
/* sq sge section start address, 1~127 sges */
- struct hinic_sq_bufdesc buf_descs[0];
+ struct hinic_sq_bufdesc buf_descs[];
};
struct hinic_txq_stats {
int is_split;
unsigned int fec_modes_supported;
- } ports[0];
+ } ports[];
};
struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
struct vring_avail {
uint16_t flags;
uint16_t idx;
- uint16_t ring[0];
+ uint16_t ring[];
};
/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
struct vring_used {
uint16_t flags;
uint16_t idx;
- struct vring_used_elem ring[0];
+ struct vring_used_elem ring[];
};
/* For support of packed virtqueues in Virtio 1.1 the format of descriptors
struct vhost_memory_kernel {
uint32_t nregions;
uint32_t padding;
- struct vhost_memory_region regions[0];
+ struct vhost_memory_region regions[];
};
/* vhost kernel ioctls */
struct vhost_vdpa_config {
uint32_t off;
uint32_t len;
- uint8_t buf[0];
+ uint8_t buf[];
};
struct vhost_msg {
uint16_t *notify_addr;
struct rte_mbuf **sw_ring; /**< RX software ring. */
- struct vq_desc_extra vq_descx[0];
+ struct vq_desc_extra vq_descx[];
};
/* If multiqueue is provided by host, then we support it. */
struct mlx5_rxp_response {
struct mlx5_rxp_response_desc header;
- struct mlx5_rxp_match_tuple matches[0];
+ struct mlx5_rxp_match_tuple matches[];
};
#define MLX5_RXP_MAX_MATCHES 254
struct mlx5_rxp_ctl_rules_pgm {
struct mlx5_rxp_ctl_hdr hdr;
uint32_t count;
- struct mlx5_rxp_rof_entry rules[0];
+ struct mlx5_rxp_rof_entry rules[];
} __rte_packed;
/* RXP programming mode setting. */
uint32_t len;
uint32_t head;
uint32_t tail;
- struct rte_mbuf *m_table[0];
+ struct rte_mbuf *m_table[];
};
struct rx_queue {
struct follow_up_msg {
struct ptp_header hdr;
struct tstamp precise_origin_tstamp;
- uint8_t suffix[0];
+ uint8_t suffix[];
} __rte_packed;
struct delay_req_msg {
struct ptp_header hdr;
struct tstamp rx_tstamp;
struct port_id req_port_id;
- uint8_t suffix[0];
+ uint8_t suffix[];
} __rte_packed;
struct ptp_message {
uint8_t padding[3];
void *event_mdata;
/**< Event metadata (aka *union rte_event_crypto_metadata*) */
- uint8_t sess_private_data[0];
+ uint8_t sess_private_data[];
};
#ifdef __cplusplus
__extension__ struct {
void *data;
uint16_t refcnt;
- } sess_data[0];
+ } sess_data[];
/**< Driver specific session material, variable size */
};
*/
enum rte_event_timer_state state;
/**< State of the event timer. */
- uint8_t user_meta[0];
+ uint8_t user_meta[];
/**< Memory to store user specific metadata.
* The event timer adapter implementation should not modify this area.
*/
struct ip_frag_pkt *last; /* last used entry. */
struct ip_pkt_list lru; /* LRU list for table entries. */
struct ip_frag_tbl_stat stat; /* statistics counters. */
- __extension__ struct ip_frag_pkt pkt[0]; /* hash table. */
+ __extension__ struct ip_frag_pkt pkt[]; /* hash table. */
};
#endif /* _IP_REASSEMBLY_H_ */
struct replay_sqn {
rte_rwlock_t rwl;
uint64_t sqn;
- __extension__ uint64_t window[0];
+ __extension__ uint64_t window[];
};
/*IPSEC SA supported algorithms */
uint8_t depth;
uint8_t flag;
uint64_t nh;
- __extension__ uint64_t ext[0];
+ __extension__ uint64_t ext[];
};
struct rte_rib {
uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE];
uint8_t depth;
uint8_t flag;
- __extension__ uint64_t ext[0];
+ __extension__ uint64_t ext[];
};
struct rte_rib6 {
uint32_t sig[TABLE_KEYS_PER_BUCKET];
uint8_t key_timeout_id[TABLE_KEYS_PER_BUCKET];
uint8_t pad[TABLE_BUCKET_PAD_SIZE];
- uint8_t key[0];
+ uint8_t key[];
};
struct table_params {
uint8_t key_mask0[RTE_CACHE_LINE_SIZE];
/* Table buckets. */
- uint8_t buckets[0];
+ uint8_t buckets[];
} __rte_cache_aligned;
/* The timeout (in cycles) is stored in the table as a 32-bit value by truncating its least
uint64_t key[4][2];
/* Cache line 2 */
- uint8_t data[0];
+ uint8_t data[];
};
#else
struct rte_bucket_4_16 {
uint64_t key[4][2];
/* Cache line 2 */
- uint8_t data[0];
+ uint8_t data[];
};
#endif
uint64_t key[4][4];
/* Cache line 3 */
- uint8_t data[0];
+ uint8_t data[];
};
#else
struct rte_bucket_4_32 {
uint64_t key[4][4];
/* Cache line 3 */
- uint8_t data[0];
+ uint8_t data[];
};
#endif
uint64_t key[4];
/* Cache line 1 */
- uint8_t data[0];
+ uint8_t data[];
};
#else
struct rte_bucket_4_8 {
uint64_t key[4];
/* Cache line 1 */
- uint8_t data[0];
+ uint8_t data[];
};
#endif
uint16_t desc_num;
uint16_t last_inflight_io;
uint16_t used_idx;
- struct rte_vhost_inflight_desc_split desc[0];
+ struct rte_vhost_inflight_desc_split desc[];
};
struct rte_vhost_inflight_desc_packed {
uint8_t used_wrap_counter;
uint8_t old_used_wrap_counter;
uint8_t padding[7];
- struct rte_vhost_inflight_desc_packed desc[0];
+ struct rte_vhost_inflight_desc_packed desc[];
};
struct rte_vhost_resubmit_desc {