* address (6 bits for address shift + 4 bits for the DQRR size).
*/
struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
- __attribute__((aligned(1024)));
+ __rte_aligned(1024);
#endif
};
};
u64 opaque;
};
-} __attribute__((aligned(8)));
+} __rte_aligned(8);
static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
{
return buf->addr;
u32 cmd;
u32 status;
};
-} __attribute__((aligned(8)));
+} __rte_aligned(8);
#define QM_FD_DD_NULL 0x00
#define QM_FD_PID_MASK 0x3f
static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
#define noinline __attribute__((noinline))
#endif
#define L1_CACHE_BYTES 64
-#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+#define ____cacheline_aligned __rte_aligned(L1_CACHE_BYTES)
#define __stringify_1(x) #x
#define __stringify(x) __stringify_1(x)
*/
struct sec_sd_t {
uint32_t rsvd[MAX_DESC_SIZE_WORDS];
-} __attribute__((packed, aligned(64)));
+} __attribute__((packed) __rte_aligned(64);
/* Structure encompassing a job descriptor which processes
* a single packet from a context. The job descriptor references
uint32_t in_ext_length;
struct load_command_s load_dpovrd;
uint32_t dpovrd;
-} __attribute__((packed, aligned(64)));
+} __attribute__((packed) __rte_aligned(64);
#endif
typedef uint64_t large_int_ptr;
#define MAX_PKE_PARAMS 8
#define QAT_PKE_MAX_LN_SIZE 512
-#define _PKE_ALIGN_ __attribute__((__aligned__(8)))
+#define _PKE_ALIGN_ __rte_aligned(8)
#define QAT_ASYM_MAX_PARAMS 8
#define QAT_ASYM_MODINV_NUM_IN_PARAMS 2
uint32_t pad3;
uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order). */
uint32_t pad4;
-} __attribute__ ((__aligned__(RTE_CACHE_LINE_SIZE), __packed__));
+} __attribute__ ((__packed__)) __rte_cache_aligned;
/**{ AVP device features */
struct slow_protocol_frame {
struct rte_ether_hdr eth_hdr;
struct slow_protocol slow_protocol;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
struct port_params {
uint16_t system_priority;
/**< Priority of this (unused in current implementation) */
uint16_t port_number;
/**< Port number. It corresponds to slave port id. */
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
struct lacpdu_actor_partner_params {
uint8_t tlv_type_info;
struct port_params port_params;
uint8_t state;
uint8_t reserved_3[3];
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
/** LACPDU structure (5.4.2 in 802.1AX documentation). */
struct lacpdu {
uint8_t tlv_type_terminator;
uint8_t terminator_length;
uint8_t reserved_50[50];
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
/** LACPDU frame: Contains ethernet header and LACPDU. */
struct lacpdu_header {
struct rte_ether_hdr eth_hdr;
struct lacpdu lacpdu;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
struct marker {
uint8_t subtype;
uint8_t tlv_type_terminator;
uint8_t terminator_length;
uint8_t reserved_90[90];
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
struct marker_header {
struct rte_ether_hdr eth_hdr;
struct marker marker;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
struct rte_eth_bond_8023ad_conf {
uint32_t fast_periodic_ms;
struct hns3_fd_key_cfg *key_cfg;
uint8_t *cur_key_x;
uint8_t *cur_key_y;
- uint8_t key_x[MAX_KEY_BYTES] __attribute__((aligned(4)));
- uint8_t key_y[MAX_KEY_BYTES] __attribute__((aligned(4)));
+ uint8_t key_x[MAX_KEY_BYTES] __rte_aligned(4);
+ uint8_t key_y[MAX_KEY_BYTES] __rte_aligned(4);
uint8_t vf_id = rule->vf_id;
uint8_t meta_data_region;
uint8_t tuple_size;
/* A.1 load cqes. */
p3 = (unsigned int)((vector unsigned short)p)[3];
cqes[3] = (vector unsigned char)(vector unsigned long){
- *(__attribute__((__aligned__(8))) unsigned long *)
+ *(__rte_aligned(8) unsigned long *)
&cq[pos + p3].sop_drop_qpn, 0LL};
rte_compiler_barrier();
p2 = (unsigned int)((vector unsigned short)p)[2];
cqes[2] = (vector unsigned char)(vector unsigned long){
- *(__attribute__((__aligned__(8))) unsigned long *)
+ *(__rte_aligned(8) unsigned long *)
&cq[pos + p2].sop_drop_qpn, 0LL};
rte_compiler_barrier();
/* A.1 load a block having op_own. */
p1 = (unsigned int)((vector unsigned short)p)[1];
cqes[1] = (vector unsigned char)(vector unsigned long){
- *(__attribute__((__aligned__(8))) unsigned long *)
+ *(__rte_aligned(8) unsigned long *)
&cq[pos + p1].sop_drop_qpn, 0LL};
rte_compiler_barrier();
cqes[0] = (vector unsigned char)(vector unsigned long){
- *(__attribute__((__aligned__(8))) unsigned long *)
+ *(__rte_aligned(8) unsigned long *)
&cq[pos].sop_drop_qpn, 0LL};
rte_compiler_barrier();
vec_sel((vector unsigned short)cqes[2],
(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
cqe_tmp2 = (vector unsigned char)(vector unsigned long){
- *(__attribute__((__aligned__(8))) unsigned long *)
+ *(__rte_aligned(8) unsigned long *)
&cq[pos + p3].rsvd3[9], 0LL};
cqe_tmp1 = (vector unsigned char)(vector unsigned long){
- *(__attribute__((__aligned__(8))) unsigned long *)
+ *(__rte_aligned(8) unsigned long *)
&cq[pos + p2].rsvd3[9], 0LL};
cqes[3] = (vector unsigned char)
vec_sel((vector unsigned short)cqes[3],
vec_sel((vector unsigned short)cqes[0],
(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
cqe_tmp2 = (vector unsigned char)(vector unsigned long){
- *(__attribute__((__aligned__(8))) unsigned long *)
+ *(__rte_aligned(8) unsigned long *)
&cq[pos + p1].rsvd3[9], 0LL};
cqe_tmp1 = (vector unsigned char)(vector unsigned long){
- *(__attribute__((__aligned__(8))) unsigned long *)
+ *(__rte_aligned(8) unsigned long *)
&cq[pos].rsvd3[9], 0LL};
cqes[1] = (vector unsigned char)
vec_sel((vector unsigned short)cqes[1],
int qidx;
int tx_qcidx;
__le64 dma;
-} __attribute__ ((__aligned__(64)));
+} __rte_aligned(64);
/* RX and freelist descriptor format */
#define PCIE_DESC_RX_DD (1 << 7)
int qidx;
int fl_qcidx;
int rx_qcidx;
-} __attribute__ ((__aligned__(64)));
+} __rte_aligned(64);
struct nfp_net_hw {
/* Info from the firmware */
__u32 kern_version;
__u32 prog_flags;
};
-} __attribute__((aligned(8)));
+} __rte_aligned(8);
#ifndef __NR_bpf
# if defined(__i386__)
struct virtio_tx_region {
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
- __attribute__((__aligned__(16)));
+ __rte_aligned(16);
};
static inline int
unsigned rx;
unsigned tx;
unsigned drop;
-} __attribute__((aligned(RTE_CACHE_LINE_SIZE / 2)));
+} __rte_aligned(RTE_CACHE_LINE_SIZE / 2);
static int proc_id = -1;
static unsigned num_procs = 0;
int32_t priority[RTE_ACL_MAX_CATEGORIES]; /* running priorities. */
uint32_t count; /* num of remaining tries */
/* true for allocated struct */
-} __attribute__((aligned(XMM_SIZE)));
+} __rte_aligned(XMM_SIZE);
/*
* One parms structure for each slot in the search engine.
rte_xmm_t xmm_index_mask;
rte_xmm_t xmm_ones_16;
rte_xmm_t range_base;
-} altivec_acl_const __attribute__((aligned(RTE_CACHE_LINE_SIZE))) = {
+} altivec_acl_const __rte_cache_aligned = {
{
.u32 = {0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c}
},
rte_xmm_t xmm_shuffle_input;
rte_xmm_t xmm_index_mask;
rte_xmm_t range_base;
-} neon_acl_const __attribute__((aligned(RTE_CACHE_LINE_SIZE))) = {
+} neon_acl_const __rte_cache_aligned = {
{
.u32 = {0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c}
},
uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
double pd[XMM_SIZE / sizeof(double)];
-} __attribute__((aligned(16))) rte_xmm_t;
+} __rte_aligned(16) rte_xmm_t;
#ifdef RTE_ARCH_ARM
/* NEON intrinsic vqtbl1q_u8() is not supported in ARMv7-A(AArch32) */
#endif
#ifdef RTE_ARCH_STRICT_ALIGN
-typedef uint64_t unaligned_uint64_t __attribute__ ((aligned(1)));
-typedef uint32_t unaligned_uint32_t __attribute__ ((aligned(1)));
-typedef uint16_t unaligned_uint16_t __attribute__ ((aligned(1)));
+typedef uint64_t unaligned_uint64_t __rte_aligned(1);
+typedef uint32_t unaligned_uint32_t __rte_aligned(1);
+typedef uint16_t unaligned_uint16_t __rte_aligned(1);
#else
typedef uint64_t unaligned_uint64_t;
typedef uint32_t unaligned_uint32_t;
uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
double pd[XMM_SIZE / sizeof(double)];
-} __attribute__((aligned(16))) rte_xmm_t;
+} __rte_aligned(16) rte_xmm_t;
#ifdef __cplusplus
}
uint16_t link_duplex : 1; /**< ETH_LINK_[HALF/FULL]_DUPLEX */
uint16_t link_autoneg : 1; /**< ETH_LINK_[AUTONEG/FIXED] */
uint16_t link_status : 1; /**< ETH_LINK_[DOWN/UP] */
-} __attribute__((aligned(8))); /**< aligned for atomic64 read/write */
+} __rte_aligned(8); /**< aligned for atomic64 read/write */
/* Utility constants */
#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
struct rte_ipv4_tuple v4;
struct rte_ipv6_tuple v6;
#ifdef RTE_ARCH_X86
-} __attribute__((aligned(XMM_SIZE)));
+} __rte_aligned(XMM_SIZE);
#else
};
#endif
uint32_t arp_sip; /**< sender IP address */
struct rte_ether_addr arp_tha; /**< target hardware address */
uint32_t arp_tip; /**< target IP address */
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
/**
* ARP header.
#define RTE_ARP_OP_INVREPLY 9 /* response identifying peer */
struct rte_arp_ipv4 arp_data;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
/**
* @warning
*/
struct rte_ether_addr {
uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; /**< Addr bytes in tx order */
-} __attribute__((aligned(2)));
+} __rte_aligned(2);
#define RTE_ETHER_LOCAL_ADMIN_ADDR 0x02 /**< Locally assigned Eth. address. */
#define RTE_ETHER_GROUP_ADDR 0x01 /**< Multicast or broadcast Eth. address. */
struct rte_ether_addr d_addr; /**< Destination address. */
struct rte_ether_addr s_addr; /**< Source address. */
uint16_t ether_type; /**< Frame type. */
-} __attribute__((aligned(2)));
+} __rte_aligned(2);
/**
* Ethernet VLAN Header.
struct rte_ether_hdr ether;
uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
uint32_t mpls_count;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
#define PPP_PROTOCOL_IP 0x0021
struct rte_ipv4_hdr ipv4;
struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
struct encap_vxlan_ipv4_vlan_data {
struct rte_ether_hdr ether;
struct rte_ipv4_hdr ipv4;
struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
struct encap_vxlan_ipv6_data {
struct rte_ether_hdr ether;
struct rte_ipv6_hdr ipv6;
struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
struct encap_vxlan_ipv6_vlan_data {
struct rte_ether_hdr ether;
struct rte_ipv6_hdr ipv6;
struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
struct encap_qinq_pppoe_data {
struct rte_ether_hdr ether;
struct rte_vlan_hdr svlan;
struct rte_vlan_hdr cvlan;
struct pppoe_ppp_hdr pppoe_ppp;
-} __attribute__((__packed__)) __attribute__((aligned(2)));
+} __attribute__((__packed__)) __rte_aligned(2);
static size_t
encap_data_size(struct rte_table_action_encap_config *encap)
#include <stdint.h>
#include <sys/types.h>
-#define __rte_aligned_16 __attribute__((__aligned__(16)))
+#define __rte_aligned_16 __rte_aligned(16)
#if 0
static inline uint32_t