Use the memzone namesize, Ethernet address length defines from the RTE
header files instead of locally defined versions.
Use the RTE byte swap functions instead of the x86 specific locally
defined versions.
Signed-off-by: John Daley <johndale@cisco.com>
Reviewed-by: Hyong Youb Kim <hyonkim@cisco.com>
13 files changed:
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
+#include <rte_byteorder.h>
/*
* Completion queue descriptor types
/*
* Completion queue descriptor types
{
desc->type_color = (type & CQ_DESC_TYPE_MASK) |
((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT);
{
desc->type_color = (type & CQ_DESC_TYPE_MASK) |
((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT);
- desc->q_number = cpu_to_le16(q_number & CQ_DESC_Q_NUM_MASK);
- desc->completed_index = cpu_to_le16(completed_index &
+ desc->q_number = rte_cpu_to_le_16(q_number & CQ_DESC_Q_NUM_MASK);
+ desc->completed_index = rte_cpu_to_le_16(completed_index &
CQ_DESC_COMP_NDX_MASK);
}
CQ_DESC_COMP_NDX_MASK);
}
rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
- *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
- *completed_index = le16_to_cpu(desc->completed_index) &
+ *q_number = rte_le_to_cpu_16(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+ *completed_index = rte_le_to_cpu_16(desc->completed_index) &
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
+#include <rte_byteorder.h>
#include "cq_desc.h"
/* Ethernet completion queue descriptor: 16B */
#include "cq_desc.h"
/* Ethernet completion queue descriptor: 16B */
cq_desc_enc((struct cq_desc *)desc, type,
color, q_number, completed_index);
cq_desc_enc((struct cq_desc *)desc, type,
color, q_number, completed_index);
- desc->completed_index_flags |= cpu_to_le16(
- (ingress_port ? CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT : 0) |
+ desc->completed_index_flags |= rte_cpu_to_le_16
+ ((ingress_port ? CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT : 0) |
(fcoe ? CQ_ENET_RQ_DESC_FLAGS_FCOE : 0) |
(eop ? CQ_ENET_RQ_DESC_FLAGS_EOP : 0) |
(sop ? CQ_ENET_RQ_DESC_FLAGS_SOP : 0));
(fcoe ? CQ_ENET_RQ_DESC_FLAGS_FCOE : 0) |
(eop ? CQ_ENET_RQ_DESC_FLAGS_EOP : 0) |
(sop ? CQ_ENET_RQ_DESC_FLAGS_SOP : 0));
- desc->q_number_rss_type_flags |= cpu_to_le16(
- ((rss_type & CQ_ENET_RQ_DESC_RSS_TYPE_MASK) <<
+ desc->q_number_rss_type_flags |= rte_cpu_to_le_16
+ (((rss_type & CQ_ENET_RQ_DESC_RSS_TYPE_MASK) <<
CQ_DESC_Q_NUM_BITS) |
(csum_not_calc ? CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC : 0));
CQ_DESC_Q_NUM_BITS) |
(csum_not_calc ? CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC : 0));
- desc->rss_hash = cpu_to_le32(rss_hash);
+ desc->rss_hash = rte_cpu_to_le_32(rss_hash);
- desc->bytes_written_flags = cpu_to_le16(
- (bytes_written & CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK) |
+ desc->bytes_written_flags = rte_cpu_to_le_16
+ ((bytes_written & CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK) |
(packet_error ? CQ_ENET_RQ_DESC_FLAGS_TRUNCATED : 0) |
(vlan_stripped ? CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED : 0));
(packet_error ? CQ_ENET_RQ_DESC_FLAGS_TRUNCATED : 0) |
(vlan_stripped ? CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED : 0));
- desc->vlan = cpu_to_le16(vlan);
+ desc->vlan = rte_cpu_to_le_16(vlan);
- desc->checksum_fcoe = cpu_to_le16(
- (fcoe_sof & CQ_ENET_RQ_DESC_FCOE_SOF_MASK) |
+ desc->checksum_fcoe = rte_cpu_to_le_16
+ ((fcoe_sof & CQ_ENET_RQ_DESC_FCOE_SOF_MASK) |
((fcoe_eof & CQ_ENET_RQ_DESC_FCOE_EOF_MASK) <<
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT));
} else {
((fcoe_eof & CQ_ENET_RQ_DESC_FCOE_EOF_MASK) <<
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT));
} else {
- desc->checksum_fcoe = cpu_to_le16(checksum);
+ desc->checksum_fcoe = rte_cpu_to_le_16(checksum);
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
- completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ completed_index_flags = rte_le_to_cpu_16(desc->completed_index_flags);
q_number_rss_type_flags =
q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+ rte_le_to_cpu_16(desc->q_number_rss_type_flags);
+ bytes_written_flags = rte_le_to_cpu_16(desc->bytes_written_flags);
*ingress_port = (completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
*ingress_port = (completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
*csum_not_calc = (q_number_rss_type_flags &
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
*csum_not_calc = (q_number_rss_type_flags &
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
- *rss_hash = le32_to_cpu(desc->rss_hash);
+ *rss_hash = rte_le_to_cpu_32(desc->rss_hash);
*bytes_written = bytes_written_flags &
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
*bytes_written = bytes_written_flags &
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
/*
* Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
*/
/*
* Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
*/
- *vlan_tci = le16_to_cpu(desc->vlan);
+ *vlan_tci = rte_le_to_cpu_16(desc->vlan);
- *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ *fcoe_sof = (u8)(rte_le_to_cpu_16(desc->checksum_fcoe) &
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
*fcoe_fc_crc_ok = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
*fcoe_enc_error = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
*fcoe_fc_crc_ok = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
*fcoe_enc_error = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ *fcoe_eof = (u8)((rte_le_to_cpu_16(desc->checksum_fcoe) >>
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
*checksum = 0;
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
*checksum = 0;
*fcoe_fc_crc_ok = 0;
*fcoe_enc_error = 0;
*fcoe_eof = 0;
*fcoe_fc_crc_ok = 0;
*fcoe_enc_error = 0;
*fcoe_eof = 0;
- *checksum = le16_to_cpu(desc->checksum_fcoe);
+ *checksum = rte_le_to_cpu_16(desc->checksum_fcoe);
#ifndef _RQ_ENET_DESC_H_
#define _RQ_ENET_DESC_H_
#ifndef _RQ_ENET_DESC_H_
#define _RQ_ENET_DESC_H_
+#include <rte_byteorder.h>
+
/* Ethernet receive queue descriptor: 16B */
struct rq_enet_desc {
__le64 address;
/* Ethernet receive queue descriptor: 16B */
struct rq_enet_desc {
__le64 address;
static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
u64 address, u8 type, u16 length)
{
static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
u64 address, u8 type, u16 length)
{
- desc->address = cpu_to_le64(address);
- desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+ desc->address = rte_cpu_to_le_64(address);
+ desc->length_type = rte_cpu_to_le_16((length & RQ_ENET_LEN_MASK) |
((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
}
static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
u64 *address, u8 *type, u16 *length)
{
((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
}
static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
u64 *address, u8 *type, u16 *length)
{
- *address = le64_to_cpu(desc->address);
- *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
- *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+ *address = rte_le_to_cpu_64(desc->address);
+ *length = rte_le_to_cpu_16(desc->length_type) & RQ_ENET_LEN_MASK;
+ *type = (u8)((rte_le_to_cpu_16(desc->length_type) >> RQ_ENET_LEN_BITS) &
#include "vnic_dev.h"
#include "vnic_cq.h"
#include "vnic_dev.h"
#include "vnic_cq.h"
+#include <rte_memzone.h>
void vnic_cq_free(struct vnic_cq *cq)
{
void vnic_cq_free(struct vnic_cq *cq)
{
unsigned int desc_count, unsigned int desc_size)
{
int err;
unsigned int desc_count, unsigned int desc_size)
{
int err;
- char res_name[NAME_MAX];
+ char res_name[RTE_MEMZONE_NAMESIZE];
static int instance;
cq->index = index;
static int instance;
cq->index = index;
#include <rte_memzone.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_memzone.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include "vnic_dev.h"
#include "vnic_resource.h"
#include "vnic_dev.h"
#include "vnic_resource.h"
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info)
{
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info)
{
+ char name[RTE_MEMZONE_NAMESIZE];
u64 a0, a1 = 0;
int wait = 1000;
int err = 0;
u64 a0, a1 = 0;
int wait = 1000;
int err = 0;
static int vnic_dev_flowman_enable(struct vnic_dev *vdev, u32 *mode,
u8 *filter_actions)
{
static int vnic_dev_flowman_enable(struct vnic_dev *vdev, u32 *mode,
u8 *filter_actions)
{
+ char name[RTE_MEMZONE_NAMESIZE];
u64 args[3];
u64 ops;
static u32 instance;
u64 args[3];
u64 ops;
static u32 instance;
return 0;
/* Can we get fm_info? */
if (!vdev->flowman_info) {
return 0;
/* Can we get fm_info? */
if (!vdev->flowman_info) {
- snprintf((char *)name, sizeof(name), "vnic_flowman_info-%u",
+ snprintf((char *)name, sizeof(name), "vnic_fm_info-%u",
instance++);
vdev->flowman_info = vdev->alloc_consistent(vdev->priv,
sizeof(struct fm_info),
instance++);
vdev->flowman_info = vdev->alloc_consistent(vdev->priv,
sizeof(struct fm_info),
int wait = 1000;
int err, i;
int wait = 1000;
int err, i;
- for (i = 0; i < ETH_ALEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
mac_addr[i] = 0;
err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
if (err)
return err;
mac_addr[i] = 0;
err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
if (err)
return err;
- for (i = 0; i < ETH_ALEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
mac_addr[i] = ((u8 *)&a0)[i];
return 0;
mac_addr[i] = ((u8 *)&a0)[i];
return 0;
- for (i = 0; i < ETH_ALEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
- for (i = 0; i < ETH_ALEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
{
void *notify_addr = NULL;
dma_addr_t notify_pa = 0;
{
void *notify_addr = NULL;
dma_addr_t notify_pa = 0;
+ char name[RTE_MEMZONE_NAMESIZE];
static u32 instance;
if (vdev->notify || vdev->notify_pa) {
static u32 instance;
if (vdev->notify || vdev->notify_pa) {
int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
{
int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
{
+ char name[RTE_MEMZONE_NAMESIZE];
static u32 instance;
snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
static u32 instance;
snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
unsigned int num_bars)
{
if (!vdev) {
unsigned int num_bars)
{
if (!vdev) {
+ char name[RTE_MEMZONE_NAMESIZE];
snprintf((char *)name, sizeof(name), "%s-vnic",
pdev->device.name);
vdev = (struct vnic_dev *)rte_zmalloc_socket(name,
snprintf((char *)name, sizeof(name), "%s-vnic",
pdev->device.name);
vdev = (struct vnic_dev *)rte_zmalloc_socket(name,
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
+#include <rte_memzone.h>
#include "vnic_dev.h"
#include "vnic_rq.h"
#include "vnic_dev.h"
#include "vnic_rq.h"
unsigned int desc_count, unsigned int desc_size)
{
int rc;
unsigned int desc_count, unsigned int desc_size)
{
int rc;
- char res_name[NAME_MAX];
+ char res_name[RTE_MEMZONE_NAMESIZE];
static int instance;
rq->index = index;
static int instance;
rq->index = index;
int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
- char res_name[NAME_MAX];
+ char res_name[RTE_MEMZONE_NAMESIZE];
static int instance;
snprintf(res_name, sizeof(res_name), "%d-wq-%u", instance++, wq->index);
static int instance;
snprintf(res_name, sizeof(res_name), "%d-wq-%u", instance++, wq->index);
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
+#include <rte_byteorder.h>
+
/* Ethernet work queue descriptor: 16B */
struct wq_enet_desc {
__le64 address;
/* Ethernet work queue descriptor: 16B */
struct wq_enet_desc {
__le64 address;
u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
{
u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
{
- desc->address = cpu_to_le64(address);
- desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
- desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+ desc->address = rte_cpu_to_le_64(address);
+ desc->length = rte_cpu_to_le_16(length & WQ_ENET_LEN_MASK);
+ desc->mss_loopback = rte_cpu_to_le_16((mss & WQ_ENET_MSS_MASK) <<
WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
- desc->header_length_flags = cpu_to_le16(
- (header_length & WQ_ENET_HDRLEN_MASK) |
+ desc->header_length_flags = rte_cpu_to_le_16
+ ((header_length & WQ_ENET_HDRLEN_MASK) |
(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
- desc->vlan_tag = cpu_to_le16(vlan_tag);
+ desc->vlan_tag = rte_cpu_to_le_16(vlan_tag);
}
static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
}
static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
{
u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
{
- *address = le64_to_cpu(desc->address);
- *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
- *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+ *address = rte_le_to_cpu_64(desc->address);
+ *length = rte_le_to_cpu_16(desc->length) & WQ_ENET_LEN_MASK;
+ *mss = (rte_le_to_cpu_16(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
- *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+ *loopback = (u8)((rte_le_to_cpu_16(desc->mss_loopback) >>
WQ_ENET_LOOPBACK_SHIFT) & 1);
WQ_ENET_LOOPBACK_SHIFT) & 1);
- *header_length = le16_to_cpu(desc->header_length_flags) &
+ *header_length = rte_le_to_cpu_16(desc->header_length_flags) &
- *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ *offload_mode = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
- *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ *eop = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_FLAGS_EOP_SHIFT) & 1);
WQ_ENET_FLAGS_EOP_SHIFT) & 1);
- *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ *cq_entry = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
- *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ *fcoe_encap = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
- *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ *vlan_tag_insert = (u8)((rte_le_to_cpu_16(desc->header_length_flags) >>
WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
- *vlan_tag = le16_to_cpu(desc->vlan_tag);
+ *vlan_tag = rte_le_to_cpu_16(desc->vlan_tag);
}
#endif /* _WQ_ENET_DESC_H_ */
}
#endif /* _WQ_ENET_DESC_H_ */
#define _ENIC_H_
#include <rte_vxlan.h>
#define _ENIC_H_
#include <rte_vxlan.h>
#include "vnic_enet.h"
#include "vnic_dev.h"
#include "vnic_flowman.h"
#include "vnic_enet.h"
#include "vnic_dev.h"
#include "vnic_flowman.h"
int iommu_group_fd;
int iommu_groupid;
int eventfd;
int iommu_group_fd;
int iommu_groupid;
int eventfd;
- uint8_t mac_addr[ETH_ALEN];
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
pthread_t err_intr_thread;
int promisc;
int allmulti;
pthread_t err_intr_thread;
int promisc;
int allmulti;
#define ENIC_PAGE_ALIGN 4096UL
#define ENIC_ALIGN ENIC_PAGE_ALIGN
#define ENIC_PAGE_ALIGN 4096UL
#define ENIC_ALIGN ENIC_PAGE_ALIGN
#define ETH_ALEN 6
#define __iomem
#define ETH_ALEN 6
#define __iomem
#define rmb() rte_rmb() /* dpdk rte provided rmb */
#define wmb() rte_wmb() /* dpdk rte provided wmb */
#define rmb() rte_rmb() /* dpdk rte provided rmb */
#define wmb() rte_wmb() /* dpdk rte provided wmb */
-#define le16_to_cpu
-#define le32_to_cpu
-#define le64_to_cpu
-#define cpu_to_le16
-#define cpu_to_le32
-#define cpu_to_le64
-
#ifndef offsetof
#define offsetof(t, m) ((size_t) &((t *)0)->m)
#endif
#ifndef offsetof
#define offsetof(t, m) ((size_t) &((t *)0)->m)
#endif
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_udp.h>
+#include <rte_memzone.h>
#include "enic_compat.h"
#include "enic.h"
#include "enic_compat.h"
#include "enic.h"
enic_fm_init(struct enic *enic)
{
struct enic_flowman *fm;
enic_fm_init(struct enic *enic)
{
struct enic_flowman *fm;
+ u8 name[RTE_MEMZONE_NAMESIZE];
int rc;
if (enic->flow_filter_mode != FILTER_FLOWMAN)
int rc;
if (enic->flow_filter_mode != FILTER_FLOWMAN)
TAILQ_INIT(&fm->fet_list);
TAILQ_INIT(&fm->jump_list);
/* Allocate host memory for flowman commands */
TAILQ_INIT(&fm->fet_list);
TAILQ_INIT(&fm->jump_list);
/* Allocate host memory for flowman commands */
- snprintf((char *)name, NAME_MAX, "fm-cmd-%s", enic->bdf_name);
+ snprintf((char *)name, sizeof(name), "fm-cmd-%s", enic->bdf_name);
fm->cmd.va = enic_alloc_consistent(enic,
sizeof(union enic_flowman_cmd_mem), &fm->cmd.pa, name);
if (!fm->cmd.va) {
fm->cmd.va = enic_alloc_consistent(enic,
sizeof(union enic_flowman_cmd_mem), &fm->cmd.pa, name);
if (!fm->cmd.va) {
int err;
struct vnic_wq *wq = &enic->wq[queue_idx];
unsigned int cq_index = enic_cq_wq(enic, queue_idx);
int err;
struct vnic_wq *wq = &enic->wq[queue_idx];
unsigned int cq_index = enic_cq_wq(enic, queue_idx);
+ char name[RTE_MEMZONE_NAMESIZE];
static int instance;
wq->socket_id = socket_id;
static int instance;
wq->socket_id = socket_id;
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
int err, i;
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
int err, i;
+ u8 name[RTE_MEMZONE_NAMESIZE];
RTE_ASSERT(user_key != NULL);
RTE_ASSERT(user_key != NULL);
- snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
+ snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name);
rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
&rss_key_buf_pa, name);
if (!rss_key_buf_va)
rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
&rss_key_buf_pa, name);
if (!rss_key_buf_va)
dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
int err;
dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
int err;
+ u8 name[RTE_MEMZONE_NAMESIZE];
- snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
+ snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name);
rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
&rss_cpu_buf_pa, name);
if (!rss_cpu_buf_va)
rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
&rss_cpu_buf_pa, name);
if (!rss_cpu_buf_va)
#ifndef _ENIC_RXTX_COMMON_H_
#define _ENIC_RXTX_COMMON_H_
#ifndef _ENIC_RXTX_COMMON_H_
#define _ENIC_RXTX_COMMON_H_
+#include <rte_byteorder.h>
+
static inline uint16_t
enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
{
static inline uint16_t
enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
{
- return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
+ return rte_le_to_cpu_16(crd->completed_index_flags) &
+ ~CQ_DESC_COMP_NDX_MASK;
}
static inline uint16_t
enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
{
}
static inline uint16_t
enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
{
- return le16_to_cpu(crd->bytes_written_flags) &
+ return rte_le_to_cpu_16(crd->bytes_written_flags) &
~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
}
~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
}
static inline uint8_t
enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
{
static inline uint8_t
enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
{
- return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
+ return (rte_le_to_cpu_16(cqrd->q_number_rss_type_flags) &
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
}
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
}
static inline uint8_t
enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
{
static inline uint8_t
enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
{
- return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
+ return (uint8_t)((rte_le_to_cpu_16(cqrd->q_number_rss_type_flags) >>
CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
}
static inline uint32_t
enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
{
CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
}
static inline uint32_t
enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
{
- return le32_to_cpu(cqrd->rss_hash);
+ return rte_le_to_cpu_32(cqrd->rss_hash);
}
static inline uint16_t
enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
{
}
static inline uint16_t
enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
{
- return le16_to_cpu(cqrd->vlan);
+ return rte_le_to_cpu_16(cqrd->vlan);
}
static inline uint16_t
enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
}
static inline uint16_t
enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- return le16_to_cpu(cqrd->bytes_written_flags) &
+ return rte_le_to_cpu_16(cqrd->bytes_written_flags) &
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
}
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
}