#define EVDEV_DLB2_NAME_PMD dlb2_event
+/* Default values for command line devargs */
+#define DLB2_POLL_INTERVAL_DEFAULT 1000
+#define DLB2_SW_CREDIT_QUANTA_DEFAULT 32 /* Default = Worker */
+#define DLB2_SW_CREDIT_P_QUANTA_DEFAULT 256 /* Producer */
+#define DLB2_SW_CREDIT_C_QUANTA_DEFAULT 256 /* Consumer */
+#define DLB2_DEPTH_THRESH_DEFAULT 256
+
/* command line arg strings */
#define NUMA_NODE_ARG "numa_node"
#define DLB2_MAX_NUM_EVENTS "max_num_events"
#define DLB2_NUM_DIR_CREDITS "num_dir_credits"
#define DEV_ID_ARG "dev_id"
-#define DLB2_DEFER_SCHED_ARG "defer_sched"
#define DLB2_QID_DEPTH_THRESH_ARG "qid_depth_thresh"
#define DLB2_COS_ARG "cos"
+#define DLB2_POLL_INTERVAL_ARG "poll_interval"
+#define DLB2_SW_CREDIT_QUANTA_ARG "sw_credit_quanta"
+#define DLB2_HW_CREDIT_QUANTA_ARG "hw_credit_quanta"
+#define DLB2_DEPTH_THRESH_ARG "default_depth_thresh"
+#define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable"
/* Begin HW related defines and structs */
#define DLB2_MIN_DEQUEUE_TIMEOUT_NS 1
/* Note: "- 1" here to support the timeout range check in eventdev_autotest */
#define DLB2_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
-#define DLB2_SW_CREDIT_BATCH_SZ 32
+#define DLB2_SW_CREDIT_BATCH_SZ 32 /* Default - Worker */
+#define DLB2_SW_CREDIT_P_BATCH_SZ 256 /* Producer */
+#define DLB2_SW_CREDIT_C_BATCH_SZ 256 /* Consumer */
#define DLB2_NUM_SN_GROUPS 2
#define DLB2_MAX_LDB_SN_ALLOC 1024
#define DLB2_MAX_QUEUE_DEPTH_THRESHOLD 8191
#define EV_TO_DLB2_PRIO(x) ((x) >> 5)
#define DLB2_TO_EV_PRIO(x) ((x) << 5)
+enum dlb2_hw_ver {
+ DLB2_HW_VER_2,
+ DLB2_HW_VER_2_5,
+};
+
enum dlb2_hw_port_types {
DLB2_LDB_PORT,
DLB2_DIR_PORT,
/* hw-specific format - do not change */
struct dlb2_event_type {
- uint8_t major:4;
- uint8_t unused:4;
- uint8_t sub;
+ uint16_t major:4;
+ uint16_t unused:4;
+ uint16_t sub:8;
};
union dlb2_opaque_data {
uint16_t cq_idx_unmasked;
uint16_t cq_depth_mask;
uint16_t gen_bit_shift;
+ uint64_t cq_rolling_mask; /*
+ * rotate to always have right expected
+ * gen bits
+ */
+ uint64_t cq_rolling_mask_2;
+ void *cq_addr_cached; /* avoid multiple refs */
enum dlb2_port_state state;
enum dlb2_configuration_state config_state;
int num_mapped_qids;
struct dlb2_cq_pop_qe *consume_qe;
struct dlb2_eventdev *dlb2; /* back ptr */
struct dlb2_eventdev_port *ev_port; /* back ptr */
+ bool use_scalar; /* force usage of scalar code */
+ uint16_t hw_credit_quanta;
};
/* Per-process per-port mmio and memory pointers */
uint32_t num_qid_inflights; /* User config */
uint32_t num_atm_inflights; /* User config */
enum dlb2_configuration_state config_state;
- int sched_type; /* LB queue only */
- uint32_t id;
- bool is_directed;
+ int sched_type; /* LB queue only */
+ uint8_t id;
+ bool is_directed;
};
struct dlb2_eventdev_queue {
uint32_t new_event_limit;
int max_num_events_override;
int num_dir_credits_override;
+ bool vector_opts_enabled;
volatile enum dlb2_run_state run_state;
uint16_t num_dir_queues; /* total num of evdev dir queues requested */
union {
uint16_t num_dir_ports; /* total num of dir ports requested */
bool umwait_allowed;
bool global_dequeue_wait; /* Not using per dequeue wait if true */
- bool defer_sched;
enum dlb2_cq_poll_modes poll_mode;
+ int poll_interval;
+ int sw_credit_quanta;
+ int hw_credit_quanta;
+ int default_depth_thresh;
uint8_t revision;
uint8_t version;
bool configured;
int max_num_events;
int num_dir_credits_override;
int dev_id;
- int defer_sched;
struct dlb2_qid_depth_thresholds qid_depth_thresholds;
enum dlb2_cos cos_id;
+ int poll_interval;
+ int sw_credit_quanta;
+ int hw_credit_quanta;
+ int default_depth_thresh;
+ bool vector_opts_enabled;
};
/* End Eventdev related defines and structs */