/* Completion queue descriptor: 16B
*
* All completion queues have this basic layout. The
- * type_specfic area is unique for each completion
+ * type_specific area is unique for each completion
* queue type.
*/
struct cq_desc {
- __le16 completed_index;
- __le16 q_number;
- u8 type_specfic[11];
- u8 type_color;
+ uint16_t completed_index;
+ uint16_t q_number;
+ uint8_t type_specific[11];
+ uint8_t type_color;
};
#define CQ_DESC_TYPE_BITS 4
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
-static inline void cq_color_enc(struct cq_desc *desc, const u8 color)
+static inline void cq_color_enc(struct cq_desc *desc, const uint8_t color)
{
if (color)
desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT);
}
static inline void cq_desc_enc(struct cq_desc *desc,
- const u8 type, const u8 color, const u16 q_number,
- const u16 completed_index)
+ const uint8_t type, const uint8_t color, const uint16_t q_number,
+ const uint16_t completed_index)
{
desc->type_color = (type & CQ_DESC_TYPE_MASK) |
((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT);
}
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+ uint8_t *type, uint8_t *color, uint16_t *q_number,
+ uint16_t *completed_index)
{
const struct cq_desc *desc = desc_arg;
- const u8 type_color = desc->type_color;
+ const uint8_t type_color = desc->type_color;
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
* result in reading stale values.
*/
- rmb();
+ rte_rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
*q_number = rte_le_to_cpu_16(desc->q_number) & CQ_DESC_Q_NUM_MASK;
CQ_DESC_COMP_NDX_MASK;
}
-static inline void cq_color_dec(const struct cq_desc *desc_arg, u8 *color)
+static inline void cq_color_dec(const struct cq_desc *desc_arg, uint8_t *color)
{
volatile const struct cq_desc *desc = desc_arg;
/* Ethernet completion queue descriptor: 16B */
struct cq_enet_wq_desc {
- __le16 completed_index;
- __le16 q_number;
- u8 reserved[11];
- u8 type_color;
+ uint16_t completed_index;
+ uint16_t q_number;
+ uint8_t reserved[11];
+ uint8_t type_color;
};
static inline void cq_enet_wq_desc_enc(struct cq_enet_wq_desc *desc,
- u8 type, u8 color, u16 q_number, u16 completed_index)
+ uint8_t type, uint8_t color, uint16_t q_number,
+ uint16_t completed_index)
{
cq_desc_enc((struct cq_desc *)desc, type,
color, q_number, completed_index);
}
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+ uint8_t *type, uint8_t *color, uint16_t *q_number,
+ uint16_t *completed_index)
{
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
- __le16 completed_index_flags;
- __le16 q_number_rss_type_flags;
- __le32 rss_hash;
- __le16 bytes_written_flags;
- __le16 vlan;
- __le16 checksum_fcoe;
- u8 flags;
- u8 type_color;
+ uint16_t completed_index_flags;
+ uint16_t q_number_rss_type_flags;
+ uint32_t rss_hash;
+ uint16_t bytes_written_flags;
+ uint16_t vlan;
+ uint16_t checksum_fcoe;
+ uint8_t flags;
+ uint8_t type_color;
};
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_clsf_desc {
- __le16 completed_index_flags;
- __le16 q_number_rss_type_flags;
- __le16 filter_id;
- __le16 lif;
- __le16 bytes_written_flags;
- __le16 vlan;
- __le16 checksum_fcoe;
- u8 flags;
- u8 type_color;
+ uint16_t completed_index_flags;
+ uint16_t q_number_rss_type_flags;
+ uint16_t filter_id;
+ uint16_t lif;
+ uint16_t bytes_written_flags;
+ uint16_t vlan;
+ uint16_t checksum_fcoe;
+ uint8_t flags;
+ uint8_t type_color;
};
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
static inline void cq_enet_rq_desc_enc(struct cq_enet_rq_desc *desc,
- u8 type, u8 color, u16 q_number, u16 completed_index,
- u8 ingress_port, u8 fcoe, u8 eop, u8 sop, u8 rss_type, u8 csum_not_calc,
- u32 rss_hash, u16 bytes_written, u8 packet_error, u8 vlan_stripped,
- u16 vlan, u16 checksum, u8 fcoe_sof, u8 fcoe_fc_crc_ok,
- u8 fcoe_enc_error, u8 fcoe_eof, u8 tcp_udp_csum_ok, u8 udp, u8 tcp,
- u8 ipv4_csum_ok, u8 ipv6, u8 ipv4, u8 ipv4_fragment, u8 fcs_ok)
+ uint8_t type, uint8_t color, uint16_t q_number,
+ uint16_t completed_index, uint8_t ingress_port, uint8_t fcoe,
+ uint8_t eop, uint8_t sop, uint8_t rss_type, uint8_t csum_not_calc,
+ uint32_t rss_hash, uint16_t bytes_written, uint8_t packet_error,
+ uint8_t vlan_stripped, uint16_t vlan, uint16_t checksum,
+ uint8_t fcoe_sof, uint8_t fcoe_fc_crc_ok, uint8_t fcoe_enc_error,
+ uint8_t fcoe_eof, uint8_t tcp_udp_csum_ok, uint8_t udp, uint8_t tcp,
+ uint8_t ipv4_csum_ok, uint8_t ipv6, uint8_t ipv4, uint8_t ipv4_fragment,
+ uint8_t fcs_ok)
{
cq_desc_enc((struct cq_desc *)desc, type,
color, q_number, completed_index);
}
static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
- u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
- u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
- u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
- u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
- u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
- u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+ uint8_t *type, uint8_t *color, uint16_t *q_number,
+ uint16_t *completed_index, uint8_t *ingress_port, uint8_t *fcoe,
+ uint8_t *eop, uint8_t *sop, uint8_t *rss_type, uint8_t *csum_not_calc,
+ uint32_t *rss_hash, uint16_t *bytes_written, uint8_t *packet_error,
+ uint8_t *vlan_stripped, uint16_t *vlan_tci, uint16_t *checksum,
+ uint8_t *fcoe_sof, uint8_t *fcoe_fc_crc_ok, uint8_t *fcoe_enc_error,
+ uint8_t *fcoe_eof, uint8_t *tcp_udp_csum_ok, uint8_t *udp, uint8_t *tcp,
+ uint8_t *ipv4_csum_ok, uint8_t *ipv6, uint8_t *ipv4,
+ uint8_t *ipv4_fragment, uint8_t *fcs_ok)
{
- u16 completed_index_flags;
- u16 q_number_rss_type_flags;
- u16 bytes_written_flags;
+ uint16_t completed_index_flags;
+ uint16_t q_number_rss_type_flags;
+ uint16_t bytes_written_flags;
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
1 : 0;
- *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ *rss_type = (uint8_t)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
*csum_not_calc = (q_number_rss_type_flags &
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
*vlan_tci = rte_le_to_cpu_16(desc->vlan);
if (*fcoe) {
- *fcoe_sof = (u8)(rte_le_to_cpu_16(desc->checksum_fcoe) &
+ *fcoe_sof = (uint8_t)(rte_le_to_cpu_16(desc->checksum_fcoe) &
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
*fcoe_fc_crc_ok = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
*fcoe_enc_error = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((rte_le_to_cpu_16(desc->checksum_fcoe) >>
+ *fcoe_eof = (uint8_t)((rte_le_to_cpu_16(desc->checksum_fcoe) >>
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
*checksum = 0;
/* Ethernet receive queue descriptor: 16B */
struct rq_enet_desc {
- __le64 address;
- __le16 length_type;
- u8 reserved[6];
+ uint64_t address;
+ uint16_t length_type;
+ uint8_t reserved[6];
};
enum rq_enet_type_types {
#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
- u64 address, u8 type, u16 length)
+ uint64_t address, uint8_t type, uint16_t length)
{
desc->address = rte_cpu_to_le_64(address);
desc->length_type = rte_cpu_to_le_16((length & RQ_ENET_LEN_MASK) |
}
static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
- u64 *address, u8 *type, u16 *length)
+ uint64_t *address, uint8_t *type, uint16_t *length)
{
*address = rte_le_to_cpu_64(desc->address);
*length = rte_le_to_cpu_16(desc->length_type) & RQ_ENET_LEN_MASK;
- *type = (u8)((rte_le_to_cpu_16(desc->length_type) >> RQ_ENET_LEN_BITS) &
- RQ_ENET_TYPE_MASK);
+ *type = (uint8_t)((rte_le_to_cpu_16(desc->length_type) >>
+ RQ_ENET_LEN_BITS) & RQ_ENET_TYPE_MASK);
}
#endif /* _RQ_ENET_DESC_H_ */
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int cq_message_enable,
- unsigned int interrupt_offset, u64 cq_message_addr)
+ unsigned int interrupt_offset, uint64_t cq_message_addr)
{
- u64 paddr;
+ uint64_t paddr;
- paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+ paddr = (uint64_t)cq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &cq->ctrl->ring_base);
iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
/* Completion queue control */
struct vnic_cq_ctrl {
- u64 ring_base; /* 0x00 */
- u32 ring_size; /* 0x08 */
- u32 pad0;
- u32 flow_control_enable; /* 0x10 */
- u32 pad1;
- u32 color_enable; /* 0x18 */
- u32 pad2;
- u32 cq_head; /* 0x20 */
- u32 pad3;
- u32 cq_tail; /* 0x28 */
- u32 pad4;
- u32 cq_tail_color; /* 0x30 */
- u32 pad5;
- u32 interrupt_enable; /* 0x38 */
- u32 pad6;
- u32 cq_entry_enable; /* 0x40 */
- u32 pad7;
- u32 cq_message_enable; /* 0x48 */
- u32 pad8;
- u32 interrupt_offset; /* 0x50 */
- u32 pad9;
- u64 cq_message_addr; /* 0x58 */
- u32 pad10;
+ uint64_t ring_base; /* 0x00 */
+ uint32_t ring_size; /* 0x08 */
+ uint32_t pad0;
+ uint32_t flow_control_enable; /* 0x10 */
+ uint32_t pad1;
+ uint32_t color_enable; /* 0x18 */
+ uint32_t pad2;
+ uint32_t cq_head; /* 0x20 */
+ uint32_t pad3;
+ uint32_t cq_tail; /* 0x28 */
+ uint32_t pad4;
+ uint32_t cq_tail_color; /* 0x30 */
+ uint32_t pad5;
+ uint32_t interrupt_enable; /* 0x38 */
+ uint32_t pad6;
+ uint32_t cq_entry_enable; /* 0x40 */
+ uint32_t pad7;
+ uint32_t cq_message_enable; /* 0x48 */
+ uint32_t pad8;
+ uint32_t interrupt_offset; /* 0x50 */
+ uint32_t pad9;
+ uint64_t cq_message_addr; /* 0x58 */
+ uint32_t pad10;
};
#ifdef ENIC_AIC
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int message_enable,
- unsigned int interrupt_offset, u64 message_addr);
+ unsigned int interrupt_offset, uint64_t message_addr);
void vnic_cq_clean(struct vnic_cq *cq);
int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
unsigned int desc_size);
};
struct vnic_intr_coal_timer_info {
- u32 mul;
- u32 div;
- u32 max_usec;
+ uint32_t mul;
+ uint32_t div;
+ uint32_t max_usec;
};
struct vnic_dev {
struct vnic_devcmd_notify *notify;
struct vnic_devcmd_notify notify_copy;
dma_addr_t notify_pa;
- u32 notify_sz;
+ uint32_t notify_sz;
dma_addr_t linkstatus_pa;
struct vnic_stats *stats;
dma_addr_t stats_pa;
struct fm_info *flowman_info;
dma_addr_t flowman_info_pa;
enum vnic_proxy_type proxy;
- u32 proxy_index;
- u64 args[VNIC_DEVCMD_NARGS];
+ uint32_t proxy_index;
+ uint64_t args[VNIC_DEVCMD_NARGS];
int in_reset;
struct vnic_intr_coal_timer_info intr_coal_timer_info;
void *(*alloc_consistent)(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name);
+ dma_addr_t *dma_handle, uint8_t *name);
void (*free_consistent)(void *priv,
size_t size, void *vaddr,
dma_addr_t dma_handle);
void vnic_register_cbacks(struct vnic_dev *vdev,
void *(*alloc_consistent)(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name),
+ dma_addr_t *dma_handle, uint8_t *name),
void (*free_consistent)(void *priv,
size_t size, void *vaddr,
dma_addr_t dma_handle))
struct vnic_resource_header __iomem *rh;
struct mgmt_barmap_hdr __iomem *mrh;
struct vnic_resource __iomem *r;
- u8 type;
+ uint8_t type;
if (num_bars == 0)
return -EINVAL;
while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
- u8 bar_num = ioread8(&r->bar);
- u32 bar_offset = ioread32(&r->bar_offset);
- u32 count = ioread32(&r->count);
- u32 len;
+ uint8_t bar_num = ioread8(&r->bar);
+ uint32_t bar_offset = ioread32(&r->bar_offset);
+ uint32_t count = ioread32(&r->count);
+ uint32_t len;
r++;
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
alloc_addr = vdev->alloc_consistent(vdev->priv,
ring->size_unaligned,
- &alloc_pa, (u8 *)z_name);
+ &alloc_pa, (uint8_t *)z_name);
if (!alloc_addr) {
pr_err("Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned,
ring->base_align);
- ring->descs = (u8 *)ring->descs_unaligned +
+ ring->descs = (uint8_t *)ring->descs_unaligned +
(ring->base_addr - ring->base_addr_unaligned);
vnic_dev_clear_desc_ring(ring);
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
unsigned int i;
int delay;
- u32 status;
+ uint32_t status;
int err;
status = ioread32(&devcmd->status);
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
writeq(vdev->args[i], &devcmd->args[i]);
- wmb(); /* complete all writes initiated till now */
+ rte_wmb(); /* complete all writes initiated till now */
}
iowrite32(cmd, &devcmd->cmd);
for (delay = 0; delay < wait; delay++) {
- udelay(100);
+ usleep(100);
status = ioread32(&devcmd->status);
if (status == 0xFFFFFFFF) {
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
- rmb();/* finish all reads initiated till now */
+ rte_rmb();/* finish all reads */
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
vdev->args[i] = readq(&devcmd->args[i]);
}
static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
- u64 *args, int nargs, int wait)
+ uint64_t *args, int nargs, int wait)
{
- u32 status;
+ uint32_t status;
int err;
/*
if (err)
return err;
- status = (u32)vdev->args[0];
+ status = (uint32_t)vdev->args[0];
if (status & STAT_ERROR) {
err = (int)vdev->args[1];
if (err != ERR_ECMDUNKNOWN ||
}
static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
- enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
+ enum vnic_devcmd_cmd cmd, uint64_t *args, int nargs, int wait)
{
int err;
}
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
- u64 *a0, u64 *a1, int wait)
+ uint64_t *a0, uint64_t *a1, int wait)
{
- u64 args[2];
+ uint64_t args[2];
int err;
args[0] = *a0;
}
int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
- u64 *args, int nargs, int wait)
+ uint64_t *args, int nargs, int wait)
{
switch (vdev->proxy) {
case PROXY_BY_INDEX:
struct vnic_devcmd_fw_info **fw_info)
{
char name[RTE_MEMZONE_NAMESIZE];
- u64 a0, a1 = 0;
+ uint64_t a0, a1 = 0;
int wait = 1000;
int err = 0;
- static u32 instance;
+ static uint32_t instance;
if (!vdev->fw_info) {
snprintf((char *)name, sizeof(name), "vnic_fw_info-%u",
instance++);
vdev->fw_info = vdev->alloc_consistent(vdev->priv,
sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa, (u8 *)name);
+ &vdev->fw_info_pa, (uint8_t *)name);
if (!vdev->fw_info)
return -ENOMEM;
a0 = vdev->fw_info_pa;
return err;
}
-static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
+static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, uint64_t *args,
int nargs)
{
memset(args, 0, nargs * sizeof(*args));
int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
{
- u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
+ uint64_t a0 = CMD_ADD_ADV_FILTER, a1 = 0;
int wait = 1000;
int err;
err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
if (err)
return 0;
- return (a1 >= (u32)FILTER_DPDK_1);
+ return (a1 >= (uint32_t)FILTER_DPDK_1);
}
-int vnic_dev_flowman_cmd(struct vnic_dev *vdev, u64 *args, int nargs)
+int vnic_dev_flowman_cmd(struct vnic_dev *vdev, uint64_t *args, int nargs)
{
int wait = 1000;
return vnic_dev_cmd_args(vdev, CMD_FLOW_MANAGER_OP, args, nargs, wait);
}
-static int vnic_dev_flowman_enable(struct vnic_dev *vdev, u32 *mode,
- u8 *filter_actions)
+static int vnic_dev_flowman_enable(struct vnic_dev *vdev, uint32_t *mode,
+ uint8_t *filter_actions)
{
char name[RTE_MEMZONE_NAMESIZE];
- u64 args[3];
- u64 ops;
- static u32 instance;
+ uint64_t args[3];
+ uint64_t ops;
+ static uint32_t instance;
/* flowman devcmd available? */
if (!vnic_dev_capable(vdev, CMD_FLOW_MANAGER_OP))
instance++);
vdev->flowman_info = vdev->alloc_consistent(vdev->priv,
sizeof(struct fm_info),
- &vdev->flowman_info_pa, (u8 *)name);
+ &vdev->flowman_info_pa, (uint8_t *)name);
if (!vdev->flowman_info)
return 0;
}
* all other filter types are not available.
* Retrun true in filter_tags if supported
*/
-int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
- u8 *filter_actions)
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, uint32_t *mode,
+ uint8_t *filter_actions)
{
- u64 args[4];
+ uint64_t args[4];
int err;
- u32 max_level = 0;
+ uint32_t max_level = 0;
/* If flowman is available, use it as it is the most capable API */
if (vnic_dev_flowman_enable(vdev, mode, filter_actions))
}
max_level = args[1];
parse_max_level:
- if (max_level >= (u32)FILTER_USNIC_IP)
+ if (max_level >= (uint32_t)FILTER_USNIC_IP)
*mode = FILTER_USNIC_IP;
else
*mode = FILTER_IPV4_5TUPLE;
void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
bool *weak)
{
- u64 a0 = CMD_NIC_CFG, a1 = 0;
+ uint64_t a0 = CMD_NIC_CFG, a1 = 0;
int wait = 1000;
int err;
int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
- u64 a0 = (u32)cmd, a1 = 0;
+ uint64_t a0 = (uint32_t)cmd, a1 = 0;
int wait = 1000;
int err;
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value)
{
- u64 a0, a1;
+ uint64_t a0, a1;
int wait = 1000;
int err;
switch (size) {
case 1:
- *(u8 *)value = (u8)a0;
+ *(uint8_t *)value = (uint8_t)a0;
break;
case 2:
- *(u16 *)value = (u16)a0;
+ *(uint16_t *)value = (uint16_t)a0;
break;
case 4:
- *(u32 *)value = (u32)a0;
+ *(uint32_t *)value = (uint32_t)a0;
break;
case 8:
- *(u64 *)value = a0;
+ *(uint64_t *)value = a0;
break;
default:
BUG();
int vnic_dev_stats_clear(struct vnic_dev *vdev)
{
- u64 a0 = 0, a1 = 0;
+ uint64_t a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
- u64 a0, a1;
+ uint64_t a0, a1;
int wait = 1000;
if (!vdev->stats)
int vnic_dev_close(struct vnic_dev *vdev)
{
- u64 a0 = 0, a1 = 0;
+ uint64_t a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
int vnic_dev_enable_wait(struct vnic_dev *vdev)
{
- u64 a0 = 0, a1 = 0;
+ uint64_t a0 = 0, a1 = 0;
int wait = 1000;
if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
int vnic_dev_disable(struct vnic_dev *vdev)
{
- u64 a0 = 0, a1 = 0;
+ uint64_t a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
int vnic_dev_open(struct vnic_dev *vdev, int arg)
{
- u64 a0 = (u32)arg, a1 = 0;
+ uint64_t a0 = (uint32_t)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
{
- u64 a0 = 0, a1 = 0;
+ uint64_t a0 = 0, a1 = 0;
int wait = 1000;
int err;
return 0;
}
-int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr)
{
- u64 a0 = 0, a1 = 0;
+ uint64_t a0 = 0, a1 = 0;
int wait = 1000;
int err, i;
return err;
for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
- mac_addr[i] = ((u8 *)&a0)[i];
+ mac_addr[i] = ((uint8_t *)&a0)[i];
return 0;
}
int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti)
{
- u64 a0, a1 = 0;
+ uint64_t a0, a1 = 0;
int wait = 1000;
int err;
return err;
}
-int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr)
{
- u64 a0 = 0, a1 = 0;
+ uint64_t a0 = 0, a1 = 0;
int wait = 1000;
int err;
int i;
for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((uint8_t *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
return err;
}
-int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr)
{
- u64 a0 = 0, a1 = 0;
+ uint64_t a0 = 0, a1 = 0;
int wait = 1000;
int err;
int i;
for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((uint8_t *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
}
int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
- u8 ig_vlan_rewrite_mode)
+ uint8_t ig_vlan_rewrite_mode)
{
- u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
+ uint64_t a0 = ig_vlan_rewrite_mode, a1 = 0;
int wait = 1000;
if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
}
int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
- void *notify_addr, dma_addr_t notify_pa, u16 intr)
+ void *notify_addr, dma_addr_t notify_pa, uint16_t intr)
{
- u64 a0, a1;
+ uint64_t a0, a1;
int wait = 1000;
int r;
vdev->notify_pa = notify_pa;
}
- a0 = (u64)notify_pa;
- a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+ a0 = (uint64_t)notify_pa;
+ a1 = ((uint64_t)intr << 32) & 0x0000ffff00000000ULL;
a1 += sizeof(struct vnic_devcmd_notify);
r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
if (!vnic_dev_in_reset(vdev))
- vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
+ vdev->notify_sz = (r == 0) ? (uint32_t)a1 : 0;
return r;
}
-int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr)
{
void *notify_addr = NULL;
dma_addr_t notify_pa = 0;
char name[RTE_MEMZONE_NAMESIZE];
- static u32 instance;
+ static uint32_t instance;
if (vdev->notify || vdev->notify_pa) {
return vnic_dev_notify_setcmd(vdev, vdev->notify,
"vnic_notify-%u", instance++);
notify_addr = vdev->alloc_consistent(vdev->priv,
sizeof(struct vnic_devcmd_notify),
- ¬ify_pa, (u8 *)name);
+ ¬ify_pa, (uint8_t *)name);
if (!notify_addr)
return -ENOMEM;
}
int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
{
- u64 a0, a1;
+ uint64_t a0, a1;
int wait = 1000;
int err;
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
- u32 *words;
+ uint32_t *words;
unsigned int nwords = vdev->notify_sz / 4;
unsigned int i;
- u32 csum;
+ uint32_t csum;
if (!vdev->notify || !vdev->notify_sz)
return 0;
do {
csum = 0;
rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
- words = (u32 *)&vdev->notify_copy;
+ words = (uint32_t *)&vdev->notify_copy;
for (i = 1; i < nwords; i++)
csum += words[i];
} while (csum != words[0]);
int vnic_dev_init(struct vnic_dev *vdev, int arg)
{
- u64 a0 = (u32)arg, a1 = 0;
+ uint64_t a0 = (uint32_t)arg, a1 = 0;
int wait = 1000;
int r = 0;
return vdev->notify_copy.link_state;
}
-u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+uint32_t vnic_dev_port_speed(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.port_speed;
}
-u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
+uint32_t vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev,
+ uint32_t usec)
{
return (usec * vdev->intr_coal_timer_info.mul) /
vdev->intr_coal_timer_info.div;
}
-u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
+uint32_t vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev,
+ uint32_t hw_cycles)
{
return (hw_cycles * vdev->intr_coal_timer_info.div) /
vdev->intr_coal_timer_info.mul;
}
-u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
+uint32_t vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
{
return vdev->intr_coal_timer_info.max_usec;
}
int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
{
char name[RTE_MEMZONE_NAMESIZE];
- static u32 instance;
+ static uint32_t instance;
snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
vdev->stats = vdev->alloc_consistent(vdev->priv,
sizeof(struct vnic_stats),
- &vdev->stats_pa, (u8 *)name);
+ &vdev->stats_pa, (uint8_t *)name);
return vdev->stats == NULL ? -ENOMEM : 0;
}
* @data: filter data
* @action: action data
*/
-int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+int vnic_dev_classifier(struct vnic_dev *vdev, uint8_t cmd, uint16_t *entry,
struct filter_v2 *data, struct filter_action_v2 *action_v2)
{
- u64 a0 = 0, a1 = 0;
+ uint64_t a0 = 0, a1 = 0;
int wait = 1000;
dma_addr_t tlv_pa;
int ret = -EINVAL;
struct filter_tlv *tlv, *tlv_va;
- u64 tlv_size;
- u32 filter_size, action_size;
+ uint64_t tlv_size;
+ uint32_t filter_size, action_size;
static unsigned int unique_id;
char z_name[RTE_MEMZONE_NAMESIZE];
enum vnic_devcmd_cmd dev_cmd;
snprintf((char *)z_name, sizeof(z_name),
"vnic_clsf_%u", unique_id++);
tlv_va = vdev->alloc_consistent(vdev->priv,
- tlv_size, &tlv_pa, (u8 *)z_name);
+ tlv_size, &tlv_pa, (uint8_t *)z_name);
if (!tlv_va)
return -ENOMEM;
tlv = tlv_va;
tlv->length = action_size;
memcpy(&tlv->val, (void *)action_v2, action_size);
ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
- *entry = (u16)a0;
+ *entry = (uint16_t)a0;
vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
} else if (cmd == CLSF_DEL) {
a0 = *entry;
return ret;
}
-int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, uint8_t overlay,
+ uint8_t config)
{
- u64 a0 = overlay;
- u64 a1 = config;
+ uint64_t a0 = overlay;
+ uint64_t a1 = config;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
}
-int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
- u16 vxlan_udp_port_number)
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, uint8_t overlay,
+ uint16_t vxlan_udp_port_number)
{
- u64 a1 = vxlan_udp_port_number;
- u64 a0 = overlay;
+ uint64_t a1 = vxlan_udp_port_number;
+ uint64_t a0 = overlay;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
{
- u64 a0 = VIC_FEATURE_VXLAN;
- u64 a1 = 0;
+ uint64_t a0 = VIC_FEATURE_VXLAN;
+ uint64_t a1 = 0;
int wait = 1000;
int ret;
int vnic_dev_capable_geneve(struct vnic_dev *vdev)
{
- u64 a0 = VIC_FEATURE_GENEVE;
- u64 a1 = 0;
+ uint64_t a0 = VIC_FEATURE_GENEVE;
+ uint64_t a1 = 0;
int wait = 1000;
int ret;
#endif
#ifndef readq
-static inline u64 readq(void __iomem *reg)
+static inline uint64_t readq(void __iomem *reg)
{
- return ((u64)readl((char *)reg + 0x4UL) << 32) |
- (u64)readl(reg);
+ return ((uint64_t)readl((char *)reg + 0x4UL) << 32) |
+ (uint64_t)readl(reg);
}
-static inline void writeq(u64 val, void __iomem *reg)
+static inline void writeq(uint64_t val, void __iomem *reg)
{
writel(val & 0xffffffff, reg);
- writel((u32)(val >> 32), (char *)reg + 0x4UL);
+ writel((uint32_t)(val >> 32), (char *)reg + 0x4UL);
}
#endif
enum vnic_res_type type);
void vnic_register_cbacks(struct vnic_dev *vdev,
void *(*alloc_consistent)(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name),
+ dma_addr_t *dma_handle, uint8_t *name),
void (*free_consistent)(void *priv,
size_t size, void *vaddr,
dma_addr_t dma_handle));
void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring);
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
- u64 *a0, u64 *a1, int wait);
+ uint64_t *a0, uint64_t *a1, int wait);
int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
- u64 *args, int nargs, int wait);
-void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index);
-void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
+ uint64_t *args, int nargs, int wait);
+void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, uint16_t index);
+void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, uint16_t bdf);
void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd);
-int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
- u8 *filter_actions);
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, uint32_t *mode,
+ uint8_t *filter_actions);
void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
bool *weak);
-int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
+int vnic_dev_asic_info(struct vnic_dev *vdev, uint16_t *asic_type,
+ uint16_t *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
int vnic_dev_stats_clear(struct vnic_dev *vdev);
int broadcast, int promisc, int allmulti);
int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
int multicast, int broadcast, int promisc, int allmulti);
-int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
-int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
-int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
-int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
-int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+int vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr);
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr);
+int vnic_dev_raise_intr(struct vnic_dev *vdev, uint16_t intr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr);
void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state);
int vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
- void *notify_addr, dma_addr_t notify_pa, u16 intr);
+ void *notify_addr, dma_addr_t notify_pa, uint16_t intr);
int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
-u32 vnic_dev_port_speed(struct vnic_dev *vdev);
-u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
-u32 vnic_dev_mtu(struct vnic_dev *vdev);
-u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
-u32 vnic_dev_notify_status(struct vnic_dev *vdev);
-u32 vnic_dev_uif(struct vnic_dev *vdev);
+uint32_t vnic_dev_port_speed(struct vnic_dev *vdev);
+uint32_t vnic_dev_msg_lvl(struct vnic_dev *vdev);
+uint32_t vnic_dev_mtu(struct vnic_dev *vdev);
+uint32_t vnic_dev_link_down_cnt(struct vnic_dev *vdev);
+uint32_t vnic_dev_notify_status(struct vnic_dev *vdev);
+uint32_t vnic_dev_uif(struct vnic_dev *vdev);
int vnic_dev_close(struct vnic_dev *vdev);
int vnic_dev_enable(struct vnic_dev *vdev);
int vnic_dev_enable_wait(struct vnic_dev *vdev);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
-int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_init_prov(struct vnic_dev *vdev, uint8_t *buf, uint32_t len);
int vnic_dev_deinit(struct vnic_dev *vdev);
void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
-u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec);
-u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles);
-u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev);
+uint32_t vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev,
+ uint32_t usec);
+uint32_t vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev,
+ uint32_t hw_cycles);
+uint32_t vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev);
void vnic_dev_unregister(struct vnic_dev *vdev);
int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
- u8 ig_vlan_rewrite_mode);
+ uint8_t ig_vlan_rewrite_mode);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
int vnic_dev_get_size(void);
-int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
-int vnic_dev_perbi(struct vnic_dev *vdev, u64 arg, u32 op);
-u32 vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev);
-int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_int13(struct vnic_dev *vdev, uint64_t arg, uint32_t op);
+int vnic_dev_perbi(struct vnic_dev *vdev, uint64_t arg, uint32_t op);
+uint32_t vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev);
+int vnic_dev_init_prov2(struct vnic_dev *vdev, uint8_t *buf, uint32_t len);
int vnic_dev_enable2(struct vnic_dev *vdev, int active);
int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
-int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
-int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+int vnic_dev_set_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr);
+int vnic_dev_classifier(struct vnic_dev *vdev, uint8_t cmd, uint16_t *entry,
struct filter_v2 *data, struct filter_action_v2 *action_v2);
-int vnic_dev_flowman_cmd(struct vnic_dev *vdev, u64 *args, int nargs);
+int vnic_dev_flowman_cmd(struct vnic_dev *vdev, uint64_t *args, int nargs);
int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev,
- u8 overlay, u8 config);
-int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
- u16 vxlan_udp_port_number);
+ uint8_t overlay, uint8_t config);
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, uint8_t overlay,
+ uint16_t vxlan_udp_port_number);
int vnic_dev_capable_vxlan(struct vnic_dev *vdev);
int vnic_dev_capable_geneve(struct vnic_dev *vdev);
#endif /* _VNIC_DEV_H_ */
/*
* mcpu fw info in mem:
* in:
- * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * (uint64_t)a0=paddr to struct vnic_devcmd_fw_info
* action:
* Fills in struct vnic_devcmd_fw_info (128 bytes)
* note:
/*
* mcpu fw info in mem:
* in:
- * (u64)a0=paddr to struct vnic_devcmd_fw_info
- * (u16)a1=size of the structure
+ * (uint64_t)a0=paddr to struct vnic_devcmd_fw_info
+ * (uint16_t)a1=size of the structure
* out:
- * (u16)a1=0 for in:a1 = 0,
+ * (uint16_t)a1=0 for in:a1 = 0,
* data size actually written for other values.
* action:
* Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
/* dev-specific block member:
- * in: (u16)a0=offset,(u8)a1=size
+ * in: (uint16_t)a0=offset,(uint8_t)a1=size
* out: a0=value
*/
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
- /* stats dump in mem: (u64)a0=paddr to stats area,
- * (u16)a1=sizeof stats area */
+ /* stats dump in mem: (uint64_t)a0=paddr to stats area,
+ * (uint16_t)a1=sizeof stats area
+ */
CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
- /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+ /* set Rx packet filter: (uint32_t)a0=filters (see CMD_PFILTER_*) */
CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
- /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */
+ /* set Rx packet filter for all: (uint32_t)a0=filters
+ * (see CMD_PFILTER_*)
+ */
CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
/* hang detection notification */
CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
- /* add VLAN id in (u16)a0 */
+ /* add VLAN id in (uint16_t)a0 */
CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
- /* del VLAN id in (u16)a0 */
+ /* del VLAN id in (uint16_t)a0 */
CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
/*
- * nic_cfg in (u32)a0
+ * nic_cfg in (uint32_t)a0
*
* Capability query:
- * out: (u64) a0= 1 if a1 is valid
- * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * out: (uint64_t) a0= 1 if a1 is valid
+ * (uint64_t) a1= (NIC_CFG bits supported) | (flags << 32)
* (flags are CMD_NIC_CFG_CAPF_xxx)
*/
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
/*
* nic_cfg_chk (same as nic_cfg, but may return error)
- * in (u32)a0
+ * in (uint32_t)a0
*
* Capability query:
- * out: (u64) a0= 1 if a1 is valid
- * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * out: (uint64_t) a0= 1 if a1 is valid
+ * (uint64_t) a1= (NIC_CFG bits supported) | (flags << 32)
* (flags are CMD_NIC_CFG_CAPF_xxx)
*/
CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
- /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+ /* union vnic_rss_key in mem: (uint64_t)a0=paddr, (uint16_t)a1=len */
CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
- /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+ /* union vnic_rss_cpu in mem: (uint64_t)a0=paddr, (uint16_t)a1=len */
CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
/* initiate softreset */
/* set struct vnic_devcmd_notify buffer in mem:
* in:
- * (u64)a0=paddr to notify (set paddr=0 to unset)
- * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
- * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * (uint64_t)a0=paddr to notify (set paddr=0 to unset)
+ * (uint32_t)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (uint16_t)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
* out:
- * (u32)a1 = effective size
+ * (uint32_t)a1 = effective size
*/
CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
- /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
- * (u8)a1=PXENV_UNDI_xxx */
+ /* UNDI API: (uint64_t)a0=paddr to s_PXENV_UNDI_ struct,
+ * (uint8_t)a1=PXENV_UNDI_xxx
+ */
CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
- /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+ /* initiate open sequence (uint32_t)a0=flags (see CMD_OPENF_*) */
CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
/* open status:
/* close vnic */
CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
- /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+ /* initialize virtual link: (uint32_t)a0=flags (see CMD_INITF_*) */
/***** Replaced by CMD_INIT *****/
CMD_INIT_v1 = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
/* variant of CMD_INIT, with provisioning info
- * (u64)a0=paddr of vnic_devcmd_provinfo
- * (u32)a1=sizeof provision info */
+ * (uint64_t)a0=paddr of vnic_devcmd_provinfo
+ * (uint32_t)a1=sizeof provision info
+ */
CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
/* enable virtual link */
CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
/* stats dump sum of all vnic stats on same uplink in mem:
- * (u64)a0=paddr
- * (u16)a1=sizeof stats area */
+ * (uint64_t)a0=paddr
+ * (uint16_t)a1=sizeof stats area
+ */
CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
/* init status:
* out: a0=0 init complete, a0=1 init in progress
- * if a0=0, a1=errno */
+ * if a0=0, a1=errno
+ */
CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
- /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
- * (u32)a1=INT13_CMD_xxx */
+ /* INT13 API: (uint64_t)a0=paddr to vnic_int13_params struct
+ * (uint32_t)a1=INT13_CMD_xxx
+ */
CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
- /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+ /* logical uplink enable/disable: (uint64_t)a0: 0/1=disable/enable */
CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
/* undo initialize of virtual link */
CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
- /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+ /* initialize virtual link: (uint32_t)a0=flags (see CMD_INITF_*) */
CMD_INIT = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 35),
/* check fw capability of a cmd:
- * in: (u32)a0=cmd
- * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+ * in: (uint32_t)a0=cmd
+ * out: (uint32_t)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits
+ */
CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
/* persistent binding info
- * in: (u64)a0=paddr of arg
- * (u32)a1=CMD_PERBI_XXX */
+ * in: (uint64_t)a0=paddr of arg
+ * (uint32_t)a1=CMD_PERBI_XXX
+ */
CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
/* Interrupt Assert Register functionality
- * in: (u16)a0=interrupt number to assert
+ * in: (uint16_t)a0=interrupt number to assert
*/
CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
/* hangreset status:
- * out: a0=0 reset complete, a0=1 reset in progress */
+ * out: a0=0 reset complete, a0=1 reset in progress
+ */
CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
/*
* Set hw ingress packet vlan rewrite mode:
- * in: (u32)a0=new vlan rewrite mode
- * out: (u32)a0=old vlan rewrite mode */
+ * in: (uint32_t)a0=new vlan rewrite mode
+ * out: (uint32_t)a0=old vlan rewrite mode
+ */
CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
/*
- * in: (u16)a0=bdf of target vnic
- * (u32)a1=cmd to proxy
+ * in: (uint16_t)a0=bdf of target vnic
+ * (uint32_t)a1=cmd to proxy
* a2-a15=args to cmd in a1
- * out: (u32)a0=status of proxied cmd
- * a1-a15=out args of proxied cmd */
+ * out: (uint32_t)a0=status of proxied cmd
+ * a1-a15=out args of proxied cmd
+ */
CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
/*
/*
* For HPP toggle:
* adapter-info-get
- * in: (u64)a0=phsical address of buffer passed in from caller.
- * (u16)a1=size of buffer specified in a0.
- * out: (u64)a0=phsical address of buffer passed in from caller.
- * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
- * 0 if no VIF-CONFIG-INFO TLV was ever received. */
+ * in: (uint64_t)a0=phsical address of buffer passed in from caller.
+ * (uint16_t)a1=size of buffer specified in a0.
+ * out: (uint64_t)a0=phsical address of buffer passed in from caller.
+ * (uint16_t)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+ * 0 if no VIF-CONFIG-INFO TLV was ever received.
+ */
CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
/*
- * INT13 API: (u64)a0=paddr to vnic_int13_params struct
- * (u32)a1=INT13_CMD_xxx
+ * INT13 API: (uint64_t)a0=paddr to vnic_int13_params struct
+ * (uint32_t)a1=INT13_CMD_xxx
*/
CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
/*
* Set default vlan:
- * in: (u16)a0=new default vlan
- * (u16)a1=zero for overriding vlan with param a0,
+ * in: (uint16_t)a0=new default vlan
+ * (uint16_t)a1=zero for overriding vlan with param a0,
* non-zero for resetting vlan to the default
- * out: (u16)a0=old default vlan
+ * out: (uint16_t)a0=old default vlan
*/
CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
/* init_prov_info2:
* Variant of CMD_INIT_PROV_INFO, where it will not try to enable
* the vnic until CMD_ENABLE2 is issued.
- * (u64)a0=paddr of vnic_devcmd_provinfo
- * (u32)a1=sizeof provision info */
+ * (uint64_t)a0=paddr of vnic_devcmd_provinfo
+ * (uint32_t)a1=sizeof provision info
+ */
CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
/* enable2:
- * (u32)a0=0 ==> standby
+ * (uint32_t)a0=0 ==> standby
* =CMD_ENABLE2_ACTIVE ==> active
*/
CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
* intr_timer_usec = intr_timer_cycles * divisor / multiplier
*
* in: none
- * out: (u32)a0 = multiplier
- * (u32)a1 = divisor
- * (u32)a2 = maximum timer value in usec
+ * out: (uint32_t)a0 = multiplier
+ * (uint32_t)a1 = divisor
+ * (uint32_t)a2 = maximum timer value in usec
*/
CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
/*
* ISCSI DUMP API:
- * in: (u64)a0=paddr of the param or param itself
- * (u32)a1=ISCSI_CMD_xxx
+ * in: (uint64_t)a0=paddr of the param or param itself
+ * (uint32_t)a1=ISCSI_CMD_xxx
*/
CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51),
/*
* ISCSI DUMP STATUS API:
- * in: (u32)a0=cmd tag
- * in: (u32)a1=ISCSI_CMD_xxx
- * out: (u32)a0=cmd status
+ * in: (uint32_t)a0=cmd tag
+ * in: (uint32_t)a1=ISCSI_CMD_xxx
+ * out: (uint32_t)a0=cmd status
*/
CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52),
* indexes are statically bound at the time of initialization.
* Based on the direction of migration, the resources of either MQ or
* the VF shall be attached to the LIF.
- * in: (u32)a0=Direction of Migration
+ * in: (uint32_t)a0=Direction of Migration
* 0=> Migrate to VF
* 1=> Migrate to MQ
- * (u32)a1=VF index (MQ index)
+ * (uint32_t)a1=VF index (MQ index)
*/
CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
/*
* Register / Deregister the notification block for MQ subvnics
* in:
- * (u64)a0=paddr to notify (set paddr=0 to unset)
- * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
- * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * (uint64_t)a0=paddr to notify (set paddr=0 to unset)
+ * (uint32_t)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (uint16_t)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
* out:
- * (u32)a1 = effective size
+ * (uint32_t)a1 = effective size
*/
CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54),
CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
/* Update the provisioning info of the given VIF
- * (u64)a0=paddr of vnic_devcmd_provinfo
- * (u32)a1=sizeof provision info */
+ * (uint64_t)a0=paddr of vnic_devcmd_provinfo
+ * (uint32_t)a1=sizeof provision info
+ */
CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
/*
* Initialization for the devcmd2 interface.
- * in: (u64) a0=host result buffer physical address
- * in: (u16) a1=number of entries in result buffer
+ * in: (uint64_t) a0=host result buffer physical address
+ * in: (uint16_t) a1=number of entries in result buffer
*/
CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
/*
* Add a filter.
- * in: (u64) a0= filter address
- * (u32) a1= size of filter
- * out: (u32) a0=filter identifier
+ * in: (uint64_t) a0= filter address
+ * (uint32_t) a1= size of filter
+ * out: (uint32_t) a0=filter identifier
*
* Capability query:
- * out: (u64) a0= 1 if capability query supported
- * (u64) a1= MAX filter type supported
+ * out: (uint64_t) a0= 1 if capability query supported
+ * (uint64_t) a1= MAX filter type supported
*/
CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
/*
* Delete a filter.
- * in: (u32) a0=filter identifier
+ * in: (uint32_t) a0=filter identifier
*/
CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
/*
* Enable a Queue Pair in User space NIC
- * in: (u32) a0=Queue Pair number
- * (u32) a1= command
+ * in: (uint32_t) a0=Queue Pair number
+ * (uint32_t) a1= command
*/
CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
/*
* Disable a Queue Pair in User space NIC
- * in: (u32) a0=Queue Pair number
- * (u32) a1= command
+ * in: (uint32_t) a0=Queue Pair number
+ * (uint32_t) a1= command
*/
CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
/*
* Stats dump Queue Pair in User space NIC
- * in: (u32) a0=Queue Pair number
- * (u64) a1=host buffer addr for status dump
- * (u32) a2=length of the buffer
+ * in: (uint32_t) a0=Queue Pair number
+ * (uint64_t) a1=host buffer addr for status dump
+ * (uint32_t) a2=length of the buffer
*/
CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
/*
* Clear stats for Queue Pair in User space NIC
- * in: (u32) a0=Queue Pair number
+ * in: (uint32_t) a0=Queue Pair number
*/
CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
/*
- * UEFI BOOT API: (u64)a0= UEFI FLS_CMD_xxx
+ * UEFI BOOT API: (uint64_t)a0= UEFI FLS_CMD_xxx
* (ui64)a1= paddr for the info buffer
*/
CMD_FC_REQ = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 64),
/*
* Return the iSCSI config details required by the EFI Option ROM
- * in: (u32) a0=0 Get Boot Info for PXE eNIC as per pxe_boot_config_t
+ * in: (uint32_t) a0=0 Get Boot Info for PXE eNIC as per
+ * pxe_boot_config_t
* a0=1 Get Boot info for iSCSI enic as per
* iscsi_boot_efi_cfg_t
- * in: (u64) a1=Host address where iSCSI config info is returned
+ * in: (uint64_t) a1=Host address where iSCSI config info is returned
*/
CMD_VNIC_BOOT_CONFIG_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 65),
/*
* Create a Queue Pair (RoCE)
- * in: (u32) a0 = Queue Pair number
- * (u32) a1 = Remote QP
- * (u32) a2 = RDMA-RQ
- * (u16) a3 = RQ Res Group
- * (u16) a4 = SQ Res Group
- * (u32) a5 = Protection Domain
- * (u64) a6 = Remote MAC
- * (u32) a7 = start PSN
- * (u16) a8 = MSS
- * (u32) a9 = protocol version
+ * in: (uint32_t) a0 = Queue Pair number
+ * (uint32_t) a1 = Remote QP
+ * (uint32_t) a2 = RDMA-RQ
+ * (uint16_t) a3 = RQ Res Group
+ * (uint16_t) a4 = SQ Res Group
+ * (uint32_t) a5 = Protection Domain
+ * (uint64_t) a6 = Remote MAC
+ * (uint32_t) a7 = start PSN
+ * (uint16_t) a8 = MSS
+ * (uint32_t) a9 = protocol version
*/
CMD_RDMA_QP_CREATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 66),
/*
* Delete a Queue Pair (RoCE)
- * in: (u32) a0 = Queue Pair number
+ * in: (uint32_t) a0 = Queue Pair number
*/
CMD_RDMA_QP_DELETE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 67),
/*
* Retrieve a Queue Pair's status information (RoCE)
- * in: (u32) a0 = Queue Pair number
- * (u64) a1 = host buffer addr for QP status struct
- * (u32) a2 = length of the buffer
+ * in: (uint32_t) a0 = Queue Pair number
+ * (uint64_t) a1 = host buffer addr for QP status struct
+ * (uint32_t) a2 = length of the buffer
*/
CMD_RDMA_QP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 68),
/*
* Use this devcmd for agreeing on the highest common version supported
* by both driver and fw for by features who need such a facility.
- * in: (u64) a0 = feature (driver requests for the supported versions
- * on this feature)
- * out: (u64) a0 = bitmap of all supported versions for that feature
+ * in: (uint64_t) a0 = feature (driver requests for the supported
+ * versions on this feature)
+ * out: (uint64_t) a0 = bitmap of all supported versions for that
+ * feature
*/
CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69),
/*
* Initialize the RDMA notification work queue
- * in: (u64) a0 = host buffer address
- * in: (u16) a1 = number of entries in buffer
- * in: (u16) a2 = resource group number
- * in: (u16) a3 = CQ number to post completion
+ * in: (uint64_t) a0 = host buffer address
+ * in: (uint16_t) a1 = number of entries in buffer
+ * in: (uint16_t) a2 = resource group number
+ * in: (uint16_t) a3 = CQ number to post completion
*/
CMD_RDMA_INIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 70),
/*
* De-init the RDMA notification work queue
- * in: (u64) a0=resource group number
+ * in: (uint64_t) a0=resource group number
*/
CMD_RDMA_DEINIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 71),
/*
* Control (Enable/Disable) overlay offloads on the given vnic
- * in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
+ * in: (uint8_t) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
* a0 = OVERLAY_FEATURE_VXLAN : VxLAN
* a0 = OVERLAY_FEATURE_GENEVE : Geneve
- * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
+ * in: (uint8_t) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
* a1 = OVERLAY_OFFLOAD_DISABLE : Disable or
* a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2
*/
/*
* Configuration of overlay offloads feature on a given vNIC
- * in: (u8) a0 = OVERLAY_CFG_VXLAN_PORT_UPDATE : VxLAN
+ * in: (uint8_t) a0 = OVERLAY_CFG_VXLAN_PORT_UPDATE : VxLAN
* OVERLAY_CFG_GENEVE_PORT_UPDATE : Geneve
- * in: (u16) a1 = unsigned short int port information
+ * in: (uint16_t) a1 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
/*
* Return the configured name for the device
- * in: (u64) a0=Host address where the name is copied
- * (u32) a1=Size of the buffer
+ * in: (uint64_t) a0=Host address where the name is copied
+ * (uint32_t) a1=Size of the buffer
*/
CMD_GET_CONFIG_NAME = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 74),
/*
* Enable group interrupt for the VF
- * in: (u32) a0 = GRPINTR_ENABLE : enable
+ * in: (uint32_t) a0 = GRPINTR_ENABLE : enable
* a0 = GRPINTR_DISABLE : disable
* a0 = GRPINTR_UPD_VECT: update group vector addr
- * in: (u32) a1 = interrupt group count
- * in: (u64) a2 = Start of host buffer address for DMAing group
+ * in: (uint32_t) a1 = interrupt group count
+ * in: (uint64_t) a2 = Start of host buffer address for DMAing group
* vector bitmap
- * in: (u64) a3 = Stride between group vectors
+ * in: (uint64_t) a3 = Stride between group vectors
*/
CMD_CONFIG_GRPINTR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 75),
/*
* Set cq arrary base and size in a list of consective wqs and
* rqs for a device
- * in: (u16) a0 = the wq relative index in the device.
+ * in: (uint16_t) a0 = the wq relative index in the device.
* -1 indicates skipping wq configuration
- * in: (u16) a1 = the wcq relative index in the device
- * in: (u16) a2 = the rq relative index in the device
+ * in: (uint16_t) a1 = the wcq relative index in the device
+ * in: (uint16_t) a2 = the rq relative index in the device
* -1 indicates skipping rq configuration
- * in: (u16) a3 = the rcq relative index in the device
+ * in: (uint16_t) a3 = the rcq relative index in the device
*/
CMD_CONFIG_CQ_ARRAY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 76),
/*
* Add an advanced filter.
- * in: (u64) a0= filter address
- * (u32) a1= size of filter
- * out: (u32) a0=filter identifier
+ * in: (uint64_t) a0= filter address
+ * (uint32_t) a1= size of filter
+ * out: (uint32_t) a0=filter identifier
*
* Capability query:
- * in: (u64) a1= supported filter capability exchange modes
- * out: (u64) a0= 1 if capability query supported
- * if (u64) a1 = 0: a1 = MAX filter type supported
- * if (u64) a1 & FILTER_CAP_MODE_V1_FLAG:
+ * in: (uint64_t) a1= supported filter capability exchange modes
+ * out: (uint64_t) a0= 1 if capability query supported
+ * if (uint64_t) a1 = 0: a1 = MAX filter type supported
+ * if (uint64_t) a1 & FILTER_CAP_MODE_V1_FLAG:
* a1 = bitmask of supported filters
* a2 = FILTER_CAP_MODE_V1
* a3 = bitmask of supported actions
/*
* Perform a Flow Manager Operation (see flowman_api.h)
- * in: (u32) a0 = sub-command
- * (u64) a1..15 = (sub-command specific)
+ * in: (uint32_t) a0 = sub-command
+ * (uint64_t) a1..15 = (sub-command specific)
*
* All arguments that have not been assigned a meaning should be
* initialized to 0 to allow for better driver forward compatibility.
char fw_build[32];
char hw_version[32];
char hw_serial_number[32];
- u16 asic_type;
- u16 asic_rev;
+ uint16_t asic_type;
+ uint16_t asic_rev;
};
enum fwinfo_asic_type {
};
struct vnic_devcmd_notify {
- u32 csum; /* checksum over following words */
-
- u32 link_state; /* link up == 1 */
- u32 port_speed; /* effective port speed (rate limit) */
- u32 mtu; /* MTU */
- u32 msglvl; /* requested driver msg lvl */
- u32 uif; /* uplink interface */
- u32 status; /* status bits (see VNIC_STF_*) */
- u32 error; /* error code (see ERR_*) for first ERR */
- u32 link_down_cnt; /* running count of link down transitions */
- u32 perbi_rebuild_cnt; /* running count of perbi rebuilds */
+ uint32_t csum; /* checksum over following words */
+
+ uint32_t link_state; /* link up == 1 */
+ uint32_t port_speed; /* effective port speed (rate limit) */
+ uint32_t mtu; /* MTU */
+ uint32_t msglvl; /* requested driver msg lvl */
+ uint32_t uif; /* uplink interface */
+ uint32_t status; /* status bits (see VNIC_STF_*) */
+ uint32_t error; /* error code (see ERR_*) for 1st ERR */
+ uint32_t link_down_cnt; /* running count of link down
+ * transitions
+ */
+ uint32_t perbi_rebuild_cnt; /* running count of perbi rebuilds */
};
#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */
0)
struct vnic_devcmd_provinfo {
- u8 oui[3];
- u8 type;
- u8 data[0];
+ uint8_t oui[3];
+ uint8_t type;
+ uint8_t data[0];
};
/*
FILTER_FIELD_USNIC_ID)
struct filter_usnic_id {
- u32 flags;
- u16 vlan;
- u16 ethtype;
- u8 proto_version;
- u32 usnic_id;
+ uint32_t flags;
+ uint16_t vlan;
+ uint16_t ethtype;
+ uint8_t proto_version;
+ uint32_t usnic_id;
} __attribute__((packed));
#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
};
struct filter_ipv4_5tuple {
- u32 flags;
- u32 protocol;
- u32 src_addr;
- u32 dst_addr;
- u16 src_port;
- u16 dst_port;
+ uint32_t flags;
+ uint32_t protocol;
+ uint32_t src_addr;
+ uint32_t dst_addr;
+ uint16_t src_port;
+ uint16_t dst_port;
} __attribute__((packed));
#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
#define FILTER_FIELDS_NVGRE FILTER_FIELD_VMQ_MAC
struct filter_mac_vlan {
- u32 flags;
- u16 vlan;
- u8 mac_addr[6];
+ uint32_t flags;
+ uint16_t vlan;
+ uint8_t mac_addr[6];
} __attribute__((packed));
#define FILTER_FIELD_VLAN_IP_3TUP_VLAN FILTER_FIELD_VALID(1)
FILTER_FIELD_VLAN_IP_3TUP_DST_PT)
struct filter_vlan_ip_3tuple {
- u32 flags;
- u16 vlan;
- u16 l3_protocol;
+ uint32_t flags;
+ uint16_t vlan;
+ uint16_t l3_protocol;
union {
- u32 dst_addr_v4;
- u8 dst_addr_v6[16];
+ uint32_t dst_addr_v4;
+ uint8_t dst_addr_v6[16];
} u;
- u32 l4_protocol;
- u16 dst_port;
+ uint32_t l4_protocol;
+ uint16_t dst_port;
} __attribute__((packed));
#define FILTER_GENERIC_1_BYTES 64
* position is only 16 bits, reserving positions > 64k to be used by firmware
*/
struct filter_generic_1 {
- u16 position; /* lower position comes first */
- u32 mask_flags;
- u32 val_flags;
- u16 mask_vlan;
- u16 val_vlan;
+ uint16_t position; /* lower position comes first */
+ uint32_t mask_flags;
+ uint32_t val_flags;
+ uint16_t mask_vlan;
+ uint16_t val_vlan;
struct {
- u8 mask[FILTER_GENERIC_1_KEY_LEN]; /* 0 bit means "don't care"*/
- u8 val[FILTER_GENERIC_1_KEY_LEN];
+ uint8_t mask[FILTER_GENERIC_1_KEY_LEN]; /* 0 bit means
+ * " don't care"
+ */
+ uint8_t val[FILTER_GENERIC_1_KEY_LEN];
} __attribute__((packed)) layer[FILTER_GENERIC_1_NUM_LAYERS];
} __attribute__((packed));
};
struct filter_action {
- u32 type;
+ uint32_t type;
union {
- u32 rq_idx;
+ uint32_t rq_idx;
} u;
} __attribute__((packed));
| FILTER_ACTION_DROP_FLAG \
| FILTER_ACTION_FILTER_ID_FLAG)
-/* Version 2 of filter action must be a strict extension of struct filter_action
- * where the first fields exactly match in size and meaning.
+/* Version 2 of filter action must be a strict extension of struct
+ * filter_action where the first fields exactly match in size and meaning.
*/
struct filter_action_v2 {
- u32 type;
- u32 rq_idx;
- u32 flags; /* use FILTER_ACTION_XXX_FLAG defines */
- u16 filter_id;
+ uint32_t type;
+ uint32_t rq_idx;
+ uint32_t flags; /* use FILTER_ACTION_XXX_FLAG defines */
+ uint16_t filter_id;
uint8_t reserved[32]; /* for future expansion */
} __attribute__((packed));
FILTER_DPDK_1_FLAG)
struct filter {
- u32 type;
+ uint32_t type;
union {
struct filter_usnic_id usnic;
struct filter_ipv4_5tuple ipv4;
* growth.
*/
struct filter_v2 {
- u32 type;
+ uint32_t type;
union {
struct filter_usnic_id usnic;
struct filter_ipv4_5tuple ipv4;
/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
#define VNIC_DEVCMD_NARGS 15
struct vnic_devcmd {
- u32 status; /* RO */
- u32 cmd; /* RW */
- u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
+ uint32_t status; /* RO */
+ uint32_t cmd; /* RW */
+ uint64_t args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian)*/
};
/*
#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
struct vnic_devcmd2 {
- u16 pad;
- u16 flags;
- u32 cmd; /* same command #defines as original */
- u64 args[VNIC_DEVCMD2_NARGS];
+ uint16_t pad;
+ uint16_t flags;
+ uint32_t cmd; /* same command #defines as original */
+ uint64_t args[VNIC_DEVCMD2_NARGS];
};
#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
struct devcmd2_result {
- u64 results[VNIC_DEVCMD2_NRESULTS];
- u32 pad;
- u16 completed_index; /* into copy WQ */
- u8 error; /* same error codes as original */
- u8 color; /* 0 or 1 as with completion queues */
+ uint64_t results[VNIC_DEVCMD2_NRESULTS];
+ uint32_t pad;
+ uint16_t completed_index; /* into copy WQ */
+ uint8_t error; /* same error codes as original */
+ uint8_t color; /* 0 or 1 as with completion queues */
};
#define DEVCMD2_RING_SIZE 32
/* Device-specific region: enet configuration */
struct vnic_enet_config {
- u32 flags;
- u32 wq_desc_count;
- u32 rq_desc_count;
- u16 mtu;
- u16 intr_timer_deprecated;
- u8 intr_timer_type;
- u8 intr_mode;
+ uint32_t flags;
+ uint32_t wq_desc_count;
+ uint32_t rq_desc_count;
+ uint16_t mtu;
+ uint16_t intr_timer_deprecated;
+ uint8_t intr_timer_type;
+ uint8_t intr_mode;
char devname[16];
- u32 intr_timer_usec;
- u16 loop_tag;
- u16 vf_rq_count;
- u16 num_arfs;
- u64 mem_paddr;
- u16 rdma_qp_id;
- u16 rdma_qp_count;
- u16 rdma_resgrp;
- u32 rdma_mr_id;
- u32 rdma_mr_count;
- u32 max_pkt_size;
+ uint32_t intr_timer_usec;
+ uint16_t loop_tag;
+ uint16_t vf_rq_count;
+ uint16_t num_arfs;
+ uint64_t mem_paddr;
+ uint16_t rdma_qp_id;
+ uint16_t rdma_qp_count;
+ uint16_t rdma_resgrp;
+ uint32_t rdma_mr_id;
+ uint32_t rdma_mr_count;
+ uint32_t max_pkt_size;
};
#define VENETF_TSO 0x1 /* TSO enabled */
return 0;
}
-void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+void vnic_intr_init(struct vnic_intr *intr, uint32_t coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
vnic_intr_coalescing_timer_set(intr, coalescing_timer);
}
void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
- u32 coalescing_timer)
+ uint32_t coalescing_timer)
{
iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev,
coalescing_timer), &intr->ctrl->coalescing_timer);
/* Interrupt control */
struct vnic_intr_ctrl {
- u32 coalescing_timer; /* 0x00 */
- u32 pad0;
- u32 coalescing_value; /* 0x08 */
- u32 pad1;
- u32 coalescing_type; /* 0x10 */
- u32 pad2;
- u32 mask_on_assertion; /* 0x18 */
- u32 pad3;
- u32 mask; /* 0x20 */
- u32 pad4;
- u32 int_credits; /* 0x28 */
- u32 pad5;
- u32 int_credit_return; /* 0x30 */
- u32 pad6;
+ uint32_t coalescing_timer; /* 0x00 */
+ uint32_t pad0;
+ uint32_t coalescing_value; /* 0x08 */
+ uint32_t pad1;
+ uint32_t coalescing_type; /* 0x10 */
+ uint32_t pad2;
+ uint32_t mask_on_assertion; /* 0x18 */
+ uint32_t pad3;
+ uint32_t mask; /* 0x20 */
+ uint32_t pad4;
+ uint32_t int_credits; /* 0x28 */
+ uint32_t pad5;
+ uint32_t int_credit_return; /* 0x30 */
+ uint32_t pad6;
};
struct vnic_intr {
#define VNIC_INTR_UNMASK_SHIFT 16
#define VNIC_INTR_RESET_TIMER_SHIFT 17
- u32 int_credit_return = (credits & 0xffff) |
+ uint32_t int_credit_return = (credits & 0xffff) |
(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
vnic_intr_return_credits(intr, credits, unmask, reset_timer);
}
-static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
+static inline uint32_t vnic_intr_legacy_pba(uint32_t __iomem *legacy_pba)
{
/* read PBA without clearing */
return ioread32(legacy_pba);
void vnic_intr_free(struct vnic_intr *intr);
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index);
-void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+void vnic_intr_init(struct vnic_intr *intr, uint32_t coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion);
void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
- u32 coalescing_timer);
+ uint32_t coalescing_timer);
void vnic_intr_clean(struct vnic_intr *intr);
#endif /* _VNIC_INTR_H_ */
#define NIC_CFG_RSS_HASH_TYPE_RSVD2 (1 << 6)
#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7)
-static inline void vnic_set_nic_cfg(u32 *nic_cfg,
- u8 rss_default_cpu, u8 rss_hash_type,
- u8 rss_hash_bits, u8 rss_base_cpu,
- u8 rss_enable, u8 tso_ipid_split_en,
- u8 ig_vlan_strip_en)
+static inline void vnic_set_nic_cfg(uint32_t *nic_cfg,
+ uint8_t rss_default_cpu, uint8_t rss_hash_type,
+ uint8_t rss_hash_bits, uint8_t rss_base_cpu,
+ uint8_t rss_enable, uint8_t tso_ipid_split_en,
+ uint8_t ig_vlan_strip_en)
{
*nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
};
struct vnic_resource_header {
- u32 magic;
- u32 version;
+ uint32_t magic;
+ uint32_t version;
};
struct mgmt_barmap_hdr {
- u32 magic; /* magic number */
- u32 version; /* header format version */
- u16 lif; /* loopback lif for mgmt frames */
- u16 pci_slot; /* installed pci slot */
+ uint32_t magic; /* magic number */
+ uint32_t version; /* header format version */
+ uint16_t lif; /* loopback lif for mgmt frames */
+ uint16_t pci_slot; /* installed pci slot */
char serial[16]; /* card serial number */
};
struct vnic_resource {
- u8 type;
- u8 bar;
- u8 pad[2];
- u32 bar_offset;
- u32 count;
+ uint8_t type;
+ uint8_t bar;
+ uint8_t pad[2];
+ uint32_t bar_offset;
+ uint32_t count;
};
#endif /* _VNIC_RESOURCE_H_ */
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- u64 paddr;
+ uint64_t paddr;
unsigned int count = rq->ring.desc_count;
- paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+ paddr = (uint64_t)rq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &rq->ctrl->ring_base);
iowrite32(count, &rq->ctrl->ring_size);
iowrite32(cq_index, &rq->ctrl->cq_index);
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- u32 fetch_index = 0;
+ uint32_t fetch_index = 0;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&rq->ctrl->running)))
return 0;
- udelay(10);
+ usleep(10);
}
pr_err("Failed to disable RQ[%d]\n", rq->index);
void (*buf_clean)(struct rte_mbuf **buf))
{
struct rte_mbuf **buf;
- u32 fetch_index, i;
+ uint32_t fetch_index, i;
unsigned int count = rq->ring.desc_count;
buf = &rq->mbuf_ring[0];
/* Receive queue control */
struct vnic_rq_ctrl {
- u64 ring_base; /* 0x00 */
- u32 ring_size; /* 0x08 */
- u32 pad0;
- u32 posted_index; /* 0x10 */
- u32 pad1;
- u32 cq_index; /* 0x18 */
- u32 pad2;
- u32 enable; /* 0x20 */
- u32 pad3;
- u32 running; /* 0x28 */
- u32 pad4;
- u32 fetch_index; /* 0x30 */
- u32 pad5;
- u32 error_interrupt_enable; /* 0x38 */
- u32 pad6;
- u32 error_interrupt_offset; /* 0x40 */
- u32 pad7;
- u32 error_status; /* 0x48 */
- u32 pad8;
- u32 tcp_sn; /* 0x50 */
- u32 pad9;
- u32 unused; /* 0x58 */
- u32 pad10;
- u32 dca_select; /* 0x60 */
- u32 pad11;
- u32 dca_value; /* 0x68 */
- u32 pad12;
- u32 data_ring; /* 0x70 */
- u32 pad13;
- u32 header_split; /* 0x78 */
- u32 pad14;
+ uint64_t ring_base; /* 0x00 */
+ uint32_t ring_size; /* 0x08 */
+ uint32_t pad0;
+ uint32_t posted_index; /* 0x10 */
+ uint32_t pad1;
+ uint32_t cq_index; /* 0x18 */
+ uint32_t pad2;
+ uint32_t enable; /* 0x20 */
+ uint32_t pad3;
+ uint32_t running; /* 0x28 */
+ uint32_t pad4;
+ uint32_t fetch_index; /* 0x30 */
+ uint32_t pad5;
+ uint32_t error_interrupt_enable; /* 0x38 */
+ uint32_t pad6;
+ uint32_t error_interrupt_offset; /* 0x40 */
+ uint32_t pad7;
+ uint32_t error_status; /* 0x48 */
+ uint32_t pad8;
+ uint32_t tcp_sn; /* 0x50 */
+ uint32_t pad9;
+ uint32_t unused; /* 0x58 */
+ uint32_t pad10;
+ uint32_t dca_select; /* 0x60 */
+ uint32_t pad11;
+ uint32_t dca_value; /* 0x68 */
+ uint32_t pad12;
+ uint32_t data_ring; /* 0x70 */
+ uint32_t pad13;
+ uint32_t header_split; /* 0x78 */
+ uint32_t pad14;
};
struct vnic_rq {
/* RSS key array */
union vnic_rss_key {
struct {
- u8 b[10];
- u8 b_pad[6];
+ uint8_t b[10];
+ uint8_t b_pad[6];
} key[4];
- u64 raw[8];
+ uint64_t raw[8];
};
/* RSS cpu array */
union vnic_rss_cpu {
struct {
- u8 b[4];
- u8 b_pad[4];
+ uint8_t b[4];
+ uint8_t b_pad[4];
} cpu[32];
- u64 raw[32];
+ uint64_t raw[32];
};
#endif /* _VNIC_RSS_H_ */
/* Tx statistics */
struct vnic_tx_stats {
- u64 tx_frames_ok;
- u64 tx_unicast_frames_ok;
- u64 tx_multicast_frames_ok;
- u64 tx_broadcast_frames_ok;
- u64 tx_bytes_ok;
- u64 tx_unicast_bytes_ok;
- u64 tx_multicast_bytes_ok;
- u64 tx_broadcast_bytes_ok;
- u64 tx_drops;
- u64 tx_errors;
- u64 tx_tso;
- u64 rsvd[16];
+ uint64_t tx_frames_ok;
+ uint64_t tx_unicast_frames_ok;
+ uint64_t tx_multicast_frames_ok;
+ uint64_t tx_broadcast_frames_ok;
+ uint64_t tx_bytes_ok;
+ uint64_t tx_unicast_bytes_ok;
+ uint64_t tx_multicast_bytes_ok;
+ uint64_t tx_broadcast_bytes_ok;
+ uint64_t tx_drops;
+ uint64_t tx_errors;
+ uint64_t tx_tso;
+ uint64_t rsvd[16];
};
/* Rx statistics */
struct vnic_rx_stats {
- u64 rx_frames_ok;
- u64 rx_frames_total;
- u64 rx_unicast_frames_ok;
- u64 rx_multicast_frames_ok;
- u64 rx_broadcast_frames_ok;
- u64 rx_bytes_ok;
- u64 rx_unicast_bytes_ok;
- u64 rx_multicast_bytes_ok;
- u64 rx_broadcast_bytes_ok;
- u64 rx_drop;
- u64 rx_no_bufs;
- u64 rx_errors;
- u64 rx_rss;
- u64 rx_crc_errors;
- u64 rx_frames_64;
- u64 rx_frames_127;
- u64 rx_frames_255;
- u64 rx_frames_511;
- u64 rx_frames_1023;
- u64 rx_frames_1518;
- u64 rx_frames_to_max;
- u64 rsvd[16];
+ uint64_t rx_frames_ok;
+ uint64_t rx_frames_total;
+ uint64_t rx_unicast_frames_ok;
+ uint64_t rx_multicast_frames_ok;
+ uint64_t rx_broadcast_frames_ok;
+ uint64_t rx_bytes_ok;
+ uint64_t rx_unicast_bytes_ok;
+ uint64_t rx_multicast_bytes_ok;
+ uint64_t rx_broadcast_bytes_ok;
+ uint64_t rx_drop;
+ uint64_t rx_no_bufs;
+ uint64_t rx_errors;
+ uint64_t rx_rss;
+ uint64_t rx_crc_errors;
+ uint64_t rx_frames_64;
+ uint64_t rx_frames_127;
+ uint64_t rx_frames_255;
+ uint64_t rx_frames_511;
+ uint64_t rx_frames_1023;
+ uint64_t rx_frames_1518;
+ uint64_t rx_frames_to_max;
+ uint64_t rsvd[16];
};
struct vnic_stats {
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- u64 paddr;
+ uint64_t paddr;
unsigned int count = wq->ring.desc_count;
- paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ paddr = (uint64_t)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(count, &wq->ctrl->ring_size);
iowrite32(fetch_index, &wq->ctrl->fetch_index);
for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
- udelay(10);
+ usleep(10);
}
pr_err("Failed to disable WQ[%d]\n", wq->index);
/* Work queue control */
struct vnic_wq_ctrl {
- u64 ring_base; /* 0x00 */
- u32 ring_size; /* 0x08 */
- u32 pad0;
- u32 posted_index; /* 0x10 */
- u32 pad1;
- u32 cq_index; /* 0x18 */
- u32 pad2;
- u32 enable; /* 0x20 */
- u32 pad3;
- u32 running; /* 0x28 */
- u32 pad4;
- u32 fetch_index; /* 0x30 */
- u32 pad5;
- u32 dca_value; /* 0x38 */
- u32 pad6;
- u32 error_interrupt_enable; /* 0x40 */
- u32 pad7;
- u32 error_interrupt_offset; /* 0x48 */
- u32 pad8;
- u32 error_status; /* 0x50 */
- u32 pad9;
+ uint64_t ring_base; /* 0x00 */
+ uint32_t ring_size; /* 0x08 */
+ uint32_t pad0;
+ uint32_t posted_index; /* 0x10 */
+ uint32_t pad1;
+ uint32_t cq_index; /* 0x18 */
+ uint32_t pad2;
+ uint32_t enable; /* 0x20 */
+ uint32_t pad3;
+ uint32_t running; /* 0x28 */
+ uint32_t pad4;
+ uint32_t fetch_index; /* 0x30 */
+ uint32_t pad5;
+ uint32_t dca_value; /* 0x38 */
+ uint32_t pad6;
+ uint32_t error_interrupt_enable; /* 0x40 */
+ uint32_t pad7;
+ uint32_t error_interrupt_offset; /* 0x48 */
+ uint32_t pad8;
+ uint32_t error_status; /* 0x50 */
+ uint32_t pad9;
};
struct vnic_wq {
return lines >> PI_LOG2_CACHE_LINE_SIZE;
}
-static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len,
+static inline uint64_t vnic_cached_posted_index(dma_addr_t addr,
+ unsigned int len,
unsigned int index)
{
unsigned int num_cache_lines = num_cache_lines_touched(addr, len);
/* Ethernet work queue descriptor: 16B */
struct wq_enet_desc {
- __le64 address;
- __le16 length;
- __le16 mss_loopback;
- __le16 header_length_flags;
- __le16 vlan_tag;
+ uint64_t address;
+ uint16_t length;
+ uint16_t mss_loopback;
+ uint16_t header_length_flags;
+ uint16_t vlan_tag;
};
#define WQ_ENET_ADDR_BITS 64
#define WQ_ENET_OFFLOAD_MODE_TSO 3
static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
- u64 address, u16 length, u16 mss, u16 header_length,
- u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
- u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+ uint64_t address, uint16_t length, uint16_t mss, uint16_t header_length,
+ uint8_t offload_mode, uint8_t eop, uint8_t cq_entry, uint8_t fcoe_encap,
+ uint8_t vlan_tag_insert, uint16_t vlan_tag, uint8_t loopback)
{
desc->address = rte_cpu_to_le_64(address);
desc->length = rte_cpu_to_le_16(length & WQ_ENET_LEN_MASK);
}
static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
- u64 *address, u16 *length, u16 *mss, u16 *header_length,
- u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
- u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+ uint64_t *address, uint16_t *length, uint16_t *mss,
+ uint16_t *header_length, uint8_t *offload_mode, uint8_t *eop,
+ uint8_t *cq_entry, uint8_t *fcoe_encap, uint8_t *vlan_tag_insert,
+ uint16_t *vlan_tag, uint8_t *loopback)
{
*address = rte_le_to_cpu_64(desc->address);
*length = rte_le_to_cpu_16(desc->length) & WQ_ENET_LEN_MASK;
*mss = (rte_le_to_cpu_16(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
WQ_ENET_MSS_MASK;
- *loopback = (u8)((rte_le_to_cpu_16(desc->mss_loopback) >>
+ *loopback = (uint8_t)((rte_le_to_cpu_16(desc->mss_loopback) >>
WQ_ENET_LOOPBACK_SHIFT) & 1);
*header_length = rte_le_to_cpu_16(desc->header_length_flags) &
WQ_ENET_HDRLEN_MASK;
- *offload_mode = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
+ *offload_mode =
+ (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
- *eop = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
+ *eop = (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_FLAGS_EOP_SHIFT) & 1);
- *cq_entry = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
+ *cq_entry = (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
- *fcoe_encap = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
+ *fcoe_encap = (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
- *vlan_tag_insert = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
+ *vlan_tag_insert =
+ (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
*vlan_tag = rte_le_to_cpu_16(desc->vlan_tag);
}
struct enic_fdir_node {
struct rte_eth_fdir_filter filter;
- u16 fltr_id;
- u16 rq_index;
+ uint16_t fltr_id;
+ uint16_t rq_index;
};
struct enic_fdir {
struct rte_eth_fdir_stats stats;
struct rte_hash *hash;
struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
- u32 modes;
- u32 types_mask;
+ uint32_t modes;
+ uint32_t types_mask;
void (*copy_fltr_fn)(struct filter_v2 *filt,
const struct rte_eth_fdir_input *input,
const struct rte_eth_fdir_masks *masks);
pthread_t err_intr_thread;
int promisc;
int allmulti;
- u8 ig_vlan_strip_en;
+ uint8_t ig_vlan_strip_en;
int link_status;
- u8 hw_ip_checksum;
- u16 max_mtu;
- u8 adv_filters;
- u32 flow_filter_mode;
- u8 filter_actions; /* HW supported actions */
+ uint8_t hw_ip_checksum;
+ uint16_t max_mtu;
+ uint8_t adv_filters;
+ uint32_t flow_filter_mode;
+ uint8_t filter_actions; /* HW supported actions */
bool vxlan;
bool disable_overlay; /* devargs disable_overlay=1 */
uint8_t enable_avx2_rx; /* devargs enable-avx2-rx=1 */
int enic_fm_init(struct enic *enic);
void enic_fm_destroy(struct enic *enic);
void *enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle,
- u8 *name);
+ uint8_t *name);
void enic_free_consistent(void *priv, size_t size, void *vaddr,
dma_addr_t dma_handle);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
void enic_fdir_info(struct enic *enic)
{
- enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
+ enic->fdir.modes = (uint32_t)RTE_FDIR_MODE_PERFECT;
enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
if (enic->adv_filters) {
struct enic_fdir_node *key;
struct filter_v2 fltr;
int32_t pos;
- u8 do_free = 0;
- u16 old_fltr_id = 0;
- u32 flowtype_supported;
- u16 flex_bytes;
- u16 queue;
+ uint8_t do_free = 0;
+ uint16_t old_fltr_id = 0;
+ uint32_t flowtype_supported;
+ uint16_t flex_bytes;
+ uint16_t queue;
struct filter_action_v2 action;
memset(&fltr, 0, sizeof(fltr));
void enic_clsf_destroy(struct enic *enic)
{
- u32 index;
+ uint32_t index;
struct enic_fdir_node *key;
/* delete classifier entries */
for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
#include <rte_log.h>
#include <rte_io.h>
-#define ENIC_PAGE_ALIGN 4096UL
-#define ENIC_ALIGN ENIC_PAGE_ALIGN
#define ETH_ALEN 6
#define __iomem
-#define rmb() rte_rmb() /* dpdk rte provided rmb */
-#define wmb() rte_wmb() /* dpdk rte provided wmb */
-
-#ifndef offsetof
-#define offsetof(t, m) ((size_t) &((t *)0)->m)
-#endif
-
#define pr_err(y, args...) dev_err(0, y, ##args)
#define pr_warn(y, args...) dev_warning(0, y, ##args)
#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__)
#define VNIC_ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
-#define udelay usleep
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-
-#define kzalloc(size, flags) calloc(1, size)
-#define kfree(x) free(x)
extern int enic_pmd_logtype;
"%s " fmt "\n", __func__, ##args)
#define ENICPMD_FUNC_TRACE() ENICPMD_LOG(DEBUG, ">>")
-#define __le16 u16
-#define __le32 u32
-#define __le64 u64
-
-typedef unsigned char u8;
-typedef unsigned short u16;
-typedef unsigned int u32;
-typedef unsigned long long u64;
typedef unsigned long long dma_addr_t;
static inline uint32_t ioread32(volatile void *addr)
/** True if it's OK for this item to be the first item. For some NIC
* versions, it's invalid to start the stack above layer 3.
*/
- const u8 valid_start_item;
+ const uint8_t valid_start_item;
/* Inner packet version of copy_item. */
enic_copy_item_fn *inner_copy_item;
};
};
static int
-mask_exact_match(const u8 *supported, const u8 *supplied,
+mask_exact_match(const uint8_t *supported, const uint8_t *supplied,
unsigned int size)
{
unsigned int i;
}
/* check that the suppied mask exactly matches capabilty */
- if (!mask_exact_match((const u8 *)&supported_mask,
- (const u8 *)item->mask, sizeof(*mask))) {
+ if (!mask_exact_match((const uint8_t *)&supported_mask,
+ (const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "IPv4 exact match mask");
return ENOTSUP;
}
}
/* check that the suppied mask exactly matches capabilty */
- if (!mask_exact_match((const u8 *)&supported_mask,
- (const u8 *)item->mask, sizeof(*mask))) {
+ if (!mask_exact_match((const uint8_t *)&supported_mask,
+ (const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "UDP exact match mask");
return ENOTSUP;
}
}
/* check that the suppied mask exactly matches capabilty */
- if (!mask_exact_match((const u8 *)&supported_mask,
- (const u8 *)item->mask, sizeof(*mask))) {
+ if (!mask_exact_match((const uint8_t *)&supported_mask,
+ (const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "TCP exact match mask");
return ENOTSUP;
}
*/
static int
item_stacking_valid(enum rte_flow_item_type prev_item,
- const struct enic_items *item_info, u8 is_first_item)
+ const struct enic_items *item_info, uint8_t is_first_item)
{
enum rte_flow_item_type const *allowed_items = item_info->prev_items;
{
int ret;
const struct rte_flow_item *item = pattern;
- u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
+ uint8_t inner_ofst = 0; /* If encapsulated, ofst into L5 */
enum rte_flow_item_type prev_item;
const struct enic_items *item_info;
struct copy_item_args args;
enic_copy_item_fn *copy_fn;
- u8 is_first_item = 1;
+ uint8_t is_first_item = 1;
ENICPMD_FUNC_TRACE();
{
struct rte_flow *flow;
int err;
- u16 entry;
+ uint16_t entry;
ENICPMD_FUNC_TRACE();
enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
struct rte_flow_error *error)
{
- u16 filter_id;
+ uint16_t filter_id;
int err;
ENICPMD_FUNC_TRACE();
struct fm_exact_match_table *cmd;
struct fm_header_set *hdr;
struct enic_fm_fet *fet;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
uint64_t *handle)
{
uint32_t bdf;
- u64 args[2];
+ uint64_t args[2];
int rc;
ENICPMD_FUNC_TRACE();
struct enic_fm_counter *ctrs;
struct enic *enic;
int i, rc;
- u64 args[2];
+ uint64_t args[2];
ENICPMD_FUNC_TRACE();
enic = fm->enic;
enic_fm_counter_zero(struct enic_flowman *fm, struct enic_fm_counter *c)
{
struct enic *enic;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
static int
enic_fm_action_free(struct enic_flowman *fm, uint64_t handle)
{
- u64 args[2];
+ uint64_t args[2];
int rc;
ENICPMD_FUNC_TRACE();
static int
enic_fm_entry_free(struct enic_flowman *fm, uint64_t handle)
{
- u64 args[2];
+ uint64_t args[2];
int rc;
ENICPMD_FUNC_TRACE();
struct rte_flow_error *error)
{
struct fm_tcam_match_entry *ftm;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
struct rte_flow_error *error)
{
struct fm_exact_match_entry *fem;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
struct fm_action *fma;
uint64_t action_h;
uint64_t entry_h;
- u64 args[3];
+ uint64_t args[3];
int ret;
ENICPMD_FUNC_TRACE();
struct rte_flow_query_count *query;
struct enic_fm_flow *fm_flow;
struct enic *enic;
- u64 args[3];
+ uint64_t args[3];
int rc;
ENICPMD_FUNC_TRACE();
static int
enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle)
{
- u64 args[2];
+ uint64_t args[2];
int rc;
args[0] = FM_MATCH_TABLE_FREE;
{
struct fm_tcam_match_table *tcam_tbl;
struct enic *enic;
- u64 args[2];
+ uint64_t args[2];
int rc;
ENICPMD_FUNC_TRACE();
enic_fm_free_all_counters(struct enic_flowman *fm)
{
struct enic *enic;
- u64 args[2];
+ uint64_t args[2];
int rc;
enic = fm->enic;
enic_fm_init(struct enic *enic)
{
struct enic_flowman *fm;
- u8 name[RTE_MEMZONE_NAMESIZE];
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
int rc;
if (enic->flow_filter_mode != FILTER_FLOWMAN)
static void enic_log_q_error(struct enic *enic)
{
unsigned int i;
- u32 error_status;
+ uint32_t error_status;
for (i = 0; i < enic->wq_count; i++) {
error_status = vnic_wq_error_status(&enic->wq[i]);
0 /* cq_entry_enable */,
1 /* cq_message_enable */,
0 /* interrupt offset */,
- (u64)enic->wq[index].cqmsg_rz->iova);
+ (uint64_t)enic->wq[index].cqmsg_rz->iova);
}
for (index = 0; index < enic->intr_count; index++) {
void *
enic_alloc_consistent(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name)
+ dma_addr_t *dma_handle, uint8_t *name)
{
void *vaddr;
const struct rte_memzone *rz;
struct enic_memzone_entry *mze;
rz = rte_memzone_reserve_aligned((const char *)name, size,
- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
if (!rz) {
pr_err("%s : Failed to allocate memory requested for %s\n",
__func__, name);
wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
sizeof(uint32_t), SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
+ RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
if (!wq->cqmsg_rz)
return -ENOMEM;
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
int err, i;
- u8 name[RTE_MEMZONE_NAMESIZE];
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
RTE_ASSERT(user_key != NULL);
snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name);
dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
int err;
- u8 name[RTE_MEMZONE_NAMESIZE];
+ uint8_t name[RTE_MEMZONE_NAMESIZE];
snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name);
rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
return err;
}
-static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
- u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
+static int enic_set_niccfg(struct enic *enic, uint8_t rss_default_cpu,
+ uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu,
+ uint8_t rss_enable)
{
- const u8 tso_ipid_split_en = 0;
+ const uint8_t tso_ipid_split_en = 0;
int err;
err = enic_set_nic_cfg(enic,
{
struct rte_eth_dev *eth_dev;
uint64_t rss_hf;
- u8 rss_hash_type;
- u8 rss_enable;
+ uint8_t rss_hash_type;
+ uint8_t rss_enable;
int ret;
RTE_ASSERT(rss_conf != NULL);
if (c->mtu == 0)
c->mtu = 1500;
- enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu,
- max_t(u16, ENIC_MIN_MTU, c->mtu));
+ enic->rte_dev->data->mtu = min_t(uint16_t, enic->max_mtu,
+ max_t(uint16_t, ENIC_MIN_MTU, c->mtu));
enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev);
dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters)
"count " : ""));
c->wq_desc_count =
- min_t(u32, ENIC_MAX_WQ_DESCS,
- max_t(u32, ENIC_MIN_WQ_DESCS,
+ min_t(uint32_t, ENIC_MAX_WQ_DESCS,
+ max_t(uint32_t, ENIC_MIN_WQ_DESCS,
c->wq_desc_count));
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
- min_t(u32, ENIC_MAX_RQ_DESCS,
- max_t(u32, ENIC_MIN_RQ_DESCS,
+ min_t(uint32_t, ENIC_MAX_RQ_DESCS,
+ max_t(uint32_t, ENIC_MIN_RQ_DESCS,
c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
- c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
+ c->intr_timer_usec = min_t(uint32_t, c->intr_timer_usec,
vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
return 0;
}
-int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
- u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
- u8 ig_vlan_strip_en)
+int enic_set_nic_cfg(struct enic *enic, uint8_t rss_default_cpu,
+ uint8_t rss_hash_type, uint8_t rss_hash_bits,
+ uint8_t rss_base_cpu, uint8_t rss_enable,
+ uint8_t tso_ipid_split_en, uint8_t ig_vlan_strip_en)
{
enum vnic_devcmd_cmd cmd;
- u64 a0, a1;
- u32 nic_cfg;
+ uint64_t a0, a1;
+ uint32_t nic_cfg;
int wait = 1000;
vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait);
}
-int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, uint64_t len)
{
- u64 a0 = (u64)key_pa, a1 = len;
+ uint64_t a0 = (uint64_t)key_pa, a1 = len;
int wait = 1000;
return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait);
}
-int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len)
+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, uint64_t len)
{
- u64 a0 = (u64)cpu_pa, a1 = len;
+ uint64_t a0 = (uint64_t)cpu_pa, a1 = len;
int wait = 1000;
return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait);
struct enic;
int enic_get_vnic_config(struct enic *);
-int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
- u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
- u8 ig_vlan_strip_en);
-int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len);
-int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len);
+int enic_set_nic_cfg(struct enic *enic, uint8_t rss_default_cpu,
+ uint8_t rss_hash_type, uint8_t rss_hash_bits,
+ uint8_t rss_base_cpu, uint8_t rss_enable,
+ uint8_t tso_ipid_split_en, uint8_t ig_vlan_strip_en);
+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, uint64_t len);
+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, uint64_t len);
void enic_get_res_counts(struct enic *enic);
void enic_init_vnic_resources(struct enic *enic);
int enic_alloc_vnic_resources(struct enic *);
return rx - rx_pkts;
}
-static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
+static inline void enic_free_wq_bufs(struct vnic_wq *wq,
+ uint16_t completed_index)
{
struct rte_mbuf *buf;
struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
{
- u16 completed_index;
+ uint16_t completed_index;
completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;