#define PAGE_ROUND_UP(x) \
((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
-/* must be >= VNIC_COUNTER_DMA_MIN_PERIOD */
-#define VNIC_FLOW_COUNTER_UPDATE_MSECS 500
-
#define ENICPMD_VFIO_PATH "/dev/vfio/vfio"
/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
LIST_ENTRY(rte_flow) next;
u16 enic_filter_id;
struct filter_v2 enic_filter;
- int counter_idx; /* NIC allocated counter index (-1 = invalid) */
};
/* Per-instance private data structure */
rte_spinlock_t mtu_lock;
LIST_HEAD(enic_flows, rte_flow) flows;
- int max_flow_counter;
- rte_spinlock_t flows_lock;
/* RSS */
uint16_t reta_size;
/* Multicast MAC addresses added to the NIC */
uint32_t mc_count;
- struct ether_addr mc_addrs[ENIC_MULTICAST_PERFECT_FILTERS];
+ struct rte_ether_addr mc_addrs[ENIC_MULTICAST_PERFECT_FILTERS];
};
/* Compute ethdev's max packet size from MTU */
static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
{
- /* ethdev max size includes eth and crc whereas NIC MTU does not */
- return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ /* ethdev max size includes eth whereas NIC MTU does not */
+ return mtu + RTE_ETHER_HDR_LEN;
}
/* Get the CQ index from a Start of Packet(SOP) RQ index */
static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
{
- return (struct enic *)eth_dev->data->dev_private;
+ return eth_dev->data->dev_private;
}
static inline uint32_t
int enic_get_link_status(struct enic *enic);
int enic_dev_stats_get(struct enic *enic,
struct rte_eth_stats *r_stats);
-void enic_dev_stats_clear(struct enic *enic);
-void enic_add_packet_filter(struct enic *enic);
+int enic_dev_stats_clear(struct enic *enic);
+int enic_add_packet_filter(struct enic *enic);
int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
int enic_del_mac_address(struct enic *enic, int mac_index);
unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);