.max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
.max_event_priority_levels = DLB2_QID_PRIORITIES,
.max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
- .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
+ .max_event_port_dequeue_depth = DLB2_DEFAULT_CQ_DEPTH,
.max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
.max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
{
struct dlb2_hw_dev *handle = &dlb2->qm_instance;
struct dlb2_hw_resource_info *dlb2_info = &handle->info;
+ int num_ldb_ports;
int ret;
/* Query driver resources provisioned for this device */
* The capabilities (CAPs) were set at compile time.
*/
+ if (dlb2->max_cq_depth != DLB2_DEFAULT_CQ_DEPTH)
+ num_ldb_ports = DLB2_MAX_HL_ENTRIES / dlb2->max_cq_depth;
+ else
+ num_ldb_ports = dlb2->hw_rsrc_query_results.num_ldb_ports;
+
evdev_dlb2_default_info.max_event_queues =
dlb2->hw_rsrc_query_results.num_ldb_queues;
- evdev_dlb2_default_info.max_event_ports =
- dlb2->hw_rsrc_query_results.num_ldb_ports;
+ evdev_dlb2_default_info.max_event_ports = num_ldb_ports;
if (dlb2->version == DLB2_HW_V2_5) {
evdev_dlb2_default_info.max_num_events =
handle->info.hw_rsrc_max.num_ldb_queues =
dlb2->hw_rsrc_query_results.num_ldb_queues;
- handle->info.hw_rsrc_max.num_ldb_ports =
- dlb2->hw_rsrc_query_results.num_ldb_ports;
+ handle->info.hw_rsrc_max.num_ldb_ports = num_ldb_ports;
handle->info.hw_rsrc_max.num_dir_ports =
dlb2->hw_rsrc_query_results.num_dir_ports;
return 0;
}
+
+static int
+set_max_cq_depth(const char *key __rte_unused,
+ const char *value,
+ void *opaque)
+{
+ int *max_cq_depth = opaque;
+ int ret;
+
+ if (value == NULL || opaque == NULL) {
+ DLB2_LOG_ERR("NULL pointer\n");
+ return -EINVAL;
+ }
+
+ ret = dlb2_string_to_int(max_cq_depth, value);
+ if (ret < 0)
+ return ret;
+
+ if (*max_cq_depth < DLB2_MIN_CQ_DEPTH_OVERRIDE ||
+ *max_cq_depth > DLB2_MAX_CQ_DEPTH_OVERRIDE ||
+ !rte_is_power_of_2(*max_cq_depth)) {
+ DLB2_LOG_ERR("dlb2: max_cq_depth %d and %d and a power of 2\n",
+ DLB2_MIN_CQ_DEPTH_OVERRIDE,
+ DLB2_MAX_CQ_DEPTH_OVERRIDE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int
set_max_num_events(const char *key __rte_unused,
const char *value,
dlb2->hw_credit_quanta = dlb2_args->hw_credit_quanta;
dlb2->default_depth_thresh = dlb2_args->default_depth_thresh;
dlb2->vector_opts_enabled = dlb2_args->vector_opts_enabled;
+ dlb2->max_cq_depth = dlb2_args->max_cq_depth;
err = dlb2_iface_open(&dlb2->qm_instance, name);
if (err < 0) {
DLB2_HW_CREDIT_QUANTA_ARG,
DLB2_DEPTH_THRESH_ARG,
DLB2_VECTOR_OPTS_ENAB_ARG,
+ DLB2_MAX_CQ_DEPTH,
NULL };
if (params != NULL && params[0] != '\0') {
return ret;
}
+ ret = rte_kvargs_process(kvlist,
+ DLB2_MAX_CQ_DEPTH,
+ set_max_cq_depth,
+ &dlb2_args->max_cq_depth);
+ if (ret != 0) {
+ DLB2_LOG_ERR("%s: Error parsing vector opts enabled",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
rte_kvargs_free(kvlist);
}
}
#define DLB2_SW_CREDIT_P_QUANTA_DEFAULT 256 /* Producer */
#define DLB2_SW_CREDIT_C_QUANTA_DEFAULT 256 /* Consumer */
#define DLB2_DEPTH_THRESH_DEFAULT 256
+#define DLB2_MIN_CQ_DEPTH_OVERRIDE 32
+#define DLB2_MAX_CQ_DEPTH_OVERRIDE 1024
/* command line arg strings */
#define NUMA_NODE_ARG "numa_node"
#define DLB2_HW_CREDIT_QUANTA_ARG "hw_credit_quanta"
#define DLB2_DEPTH_THRESH_ARG "default_depth_thresh"
#define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable"
+#define DLB2_MAX_CQ_DEPTH "max_cq_depth"
/* Begin HW related defines and structs */
* depth must be a power of 2 and must also be >= HIST LIST entries.
* As a result we just limit the maximum dequeue depth to 32.
*/
+#define DLB2_MAX_HL_ENTRIES 2048
#define DLB2_MIN_CQ_DEPTH 1
-#define DLB2_MAX_CQ_DEPTH 32
+#define DLB2_DEFAULT_CQ_DEPTH 32
#define DLB2_MIN_HARDWARE_CQ_DEPTH 8
#define DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
- DLB2_MAX_CQ_DEPTH
+ DLB2_DEFAULT_CQ_DEPTH
#define DLB2_HW_DEVICE_FROM_PCI_ID(_pdev) \
(((_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_PF) || \
int max_num_events_override;
int num_dir_credits_override;
bool vector_opts_enabled;
+ int max_cq_depth;
volatile enum dlb2_run_state run_state;
uint16_t num_dir_queues; /* total num of evdev dir queues requested */
union {
int hw_credit_quanta;
int default_depth_thresh;
bool vector_opts_enabled;
+ int max_cq_depth;
};
/* End Eventdev related defines and structs */