/* Special Filter id for non-specific packet flagging. Don't change value */
#define ENIC_MAGIC_FILTER_ID 0xffff
-#define ENICPMD_FDIR_MAX 64
-
/*
* Interrupt 0: LSC and errors
* Interrupt 1: rx queue 0
#define ENICPMD_LSC_INTR_OFFSET 0
#define ENICPMD_RXQ_INTR_OFFSET 1
-struct enic_fdir_node {
- struct rte_eth_fdir_filter filter;
- uint16_t fltr_id;
- uint16_t rq_index;
-};
-
-struct enic_fdir {
- struct rte_eth_fdir_stats stats;
- struct rte_hash *hash;
- struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
- uint32_t modes;
- uint32_t types_mask;
- void (*copy_fltr_fn)(struct filter_v2 *filt,
- const struct rte_eth_fdir_input *input,
- const struct rte_eth_fdir_masks *masks);
-};
-
struct enic_soft_stats {
rte_atomic64_t rx_nombuf;
rte_atomic64_t rx_packet_errors;
struct filter_v2 enic_filter;
/* Data for flow manager based flow (enic_fm_flow.c) */
struct enic_fm_flow *fm;
+ int internal;
};
/* Per-instance private data structure */
struct enic {
- struct enic *next;
struct rte_pci_device *pdev;
struct vnic_enet_config config;
struct vnic_dev_bar bar0;
bool overlay_offload;
struct rte_eth_dev *rte_dev;
struct rte_eth_dev_data *dev_data;
- struct enic_fdir fdir;
char bdf_name[ENICPMD_BDF_LENGTH];
int dev_fd;
int iommu_group_fd;
uint8_t adv_filters;
uint32_t flow_filter_mode;
uint8_t filter_actions; /* HW supported actions */
+ uint64_t cq_entry_sizes; /* supported CQ entry sizes */
bool vxlan;
+ bool cq64; /* actually using 64B CQ entry */
+ bool cq64_request; /* devargs cq64=1 */
bool disable_overlay; /* devargs disable_overlay=1 */
uint8_t enable_avx2_rx; /* devargs enable-avx2-rx=1 */
uint8_t geneve_opt_avail; /* Geneve with options offload available */
/* Flow manager API */
struct enic_flowman *fm;
+ uint64_t fm_vnic_handle;
+ uint32_t fm_vnic_uif;
/* switchdev */
uint8_t switchdev_mode;
uint16_t switch_domain_id;
uint16_t max_vf_id;
+ /* Number of queues needed for VF representor paths */
+ uint32_t vf_required_wq;
+ uint32_t vf_required_cq;
+ uint32_t vf_required_rq;
/*
* Lock to serialize devcmds from PF, VF representors as they all share
* the same PF devcmd instance in firmware.
uint16_t vf_id;
int allmulti;
int promisc;
+ /* Representor path uses PF queues. These are reserved during init */
+ uint16_t pf_wq_idx; /* WQ dedicated to VF rep */
+ uint16_t pf_wq_cq_idx; /* CQ for WQ */
+ uint16_t pf_rq_sop_idx; /* SOP RQ dedicated to VF rep */
+ uint16_t pf_rq_data_idx; /* Data RQ */
+ /* Representor flows managed by flowman */
+ struct rte_flow *vf2rep_flow[2];
+ struct rte_flow *rep2vf_flow[2];
};
#define VF_ENIC_TO_VF_REP(vf_enic) \
static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
{
- /* Scatter rx uses two receive queues together with one
- * completion queue, so the completion queue number is no
- * longer the same as the rq number.
- */
return rq;
}
return enic->rq_count + wq;
}
+/*
+ * WQ, RQ, CQ allocation scheme. Firmware gives the driver an array of
+ * WQs, an array of RQs, and an array of CQs. Fow now, these are
+ * statically allocated between PF app send/receive queues and VF
+ * representor app send/receive queues. VF representor supports only 1
+ * send and 1 receive queue. The number of PF app queue is not known
+ * until the queue setup time.
+ *
+ * R = number of receive queues for PF app
+ * S = number of send queues for PF app
+ * V = number of VF representors
+ *
+ * wI = WQ for PF app send queue I
+ * rI = SOP RQ for PF app receive queue I
+ * dI = Data RQ for rI
+ * cwI = CQ for wI
+ * crI = CQ for rI
+ * vwI = WQ for VF representor send queue I
+ * vrI = SOP RQ for VF representor receive queue I
+ * vdI = Data RQ for vrI
+ * vcwI = CQ for vwI
+ * vcrI = CQ for vrI
+ *
+ * WQ array: | w0 |..| wS-1 |..| vwV-1 |..| vw0 |
+ * ^ ^ ^ ^
+ * index 0 S-1 W-V W-1 W=len(WQ array)
+ *
+ * RQ array: | r0 |..| rR-1 |d0 |..|dR-1| ..|vdV-1 |..| vd0 |vrV-1 |..|vr0 |
+ * ^ ^ ^ ^ ^ ^ ^ ^
+ * index 0 R-1 R 2R-1 X-2V X-(V+1) X-V X-1
+ * X=len(RQ array)
+ *
+ * CQ array: | cr0 |..| crR-1 |cw0|..|cwS-1|..|vcwV-1|..| vcw0|vcrV-1|..|vcr0|..
+ * ^ ^ ^ ^ ^ ^ ^ ^
+ * index 0 R-1 R R+S-1 X-2V X-(V+1) X-V X-1
+ * X is not a typo. It really is len(RQ array) to accommodate enic_cq_rq() used
+ * throughout RX handlers. The current scheme requires
+ * len(CQ array) >= len(RQ array).
+ */
+
+static inline unsigned int vf_wq_cq_idx(struct enic_vf_representor *vf)
+{
+ /* rq is not a typo. index(vcwI) coincides with index(vdI) */
+ return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
+}
+
+static inline unsigned int vf_wq_idx(struct enic_vf_representor *vf)
+{
+ return vf->pf->conf_wq_count - vf->vf_id - 1;
+}
+
+static inline unsigned int vf_rq_sop_idx(struct enic_vf_representor *vf)
+{
+ return vf->pf->conf_rq_count - vf->vf_id - 1;
+}
+
+static inline unsigned int vf_rq_data_idx(struct enic_vf_representor *vf)
+{
+ return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
+}
+
static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
{
return eth_dev->data->dev_private;
}
int dev_is_enic(struct rte_eth_dev *dev);
-void enic_fdir_stats_get(struct enic *enic,
- struct rte_eth_fdir_stats *stats);
-int enic_fdir_add_fltr(struct enic *enic,
- struct rte_eth_fdir_filter *params);
-int enic_fdir_del_fltr(struct enic *enic,
- struct rte_eth_fdir_filter *params);
void enic_free_wq(void *txq);
int enic_alloc_intr_resources(struct enic *enic);
int enic_setup_finish(struct enic *enic);
void enic_post_wq_index(struct vnic_wq *wq);
int enic_probe(struct enic *enic);
-int enic_clsf_init(struct enic *enic);
-void enic_clsf_destroy(struct enic *enic);
int enic_fm_init(struct enic *enic);
void enic_fm_destroy(struct enic *enic);
void *enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle,
dma_addr_t dma_handle);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t enic_recv_pkts_64(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t enic_dummy_recv_pkts(void *rx_queue,
bool enic_use_vector_rx_handler(struct rte_eth_dev *eth_dev);
void enic_pick_rx_handler(struct rte_eth_dev *eth_dev);
void enic_pick_tx_handler(struct rte_eth_dev *eth_dev);
-void enic_fdir_info(struct enic *enic);
-void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params);
int enic_vf_representor_uninit(struct rte_eth_dev *ethdev);
int enic_fm_allocate_switch_domain(struct enic *pf);
+int enic_fm_add_rep2vf_flow(struct enic_vf_representor *vf);
+int enic_fm_add_vf2rep_flow(struct enic_vf_representor *vf);
+int enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq);
+void enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq);
+void enic_free_wq_buf(struct rte_mbuf **buf);
+void enic_free_rq_buf(struct rte_mbuf **mbuf);
extern const struct rte_flow_ops enic_flow_ops;
extern const struct rte_flow_ops enic_fm_flow_ops;