.max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
.max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
- .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
+ .max_single_link_event_port_queue_pairs =
+ DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2),
.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
RTE_EVENT_DEV_CAP_EVENT_QOS |
RTE_EVENT_DEV_CAP_BURST_MODE |
};
struct process_local_port_data
-dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
+dlb2_port[DLB2_MAX_NUM_PORTS_ALL][DLB2_NUM_PORT_TYPES];
static void
dlb2_free_qe_mem(struct dlb2_port *qm_port)
{
int q;
- for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
+ for (q = 0; q < DLB2_MAX_NUM_QUEUES(dlb2->version); q++) {
if (qid_depth_thresholds[q] != 0)
dlb2->ev_queues[q].depth_threshold =
qid_depth_thresholds[q];
return ret;
if (*num_dir_credits < 0 ||
- *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
+ *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) {
DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
- DLB2_MAX_NUM_DIR_CREDITS);
+ DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2));
return -EINVAL;
}
return 0;
}
-
static int
set_qid_depth_thresh(const char *key __rte_unused,
const char *value,
*/
if (sscanf(value, "all:%d", &thresh) == 1) {
first = 0;
- last = DLB2_MAX_NUM_QUEUES - 1;
+ last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2) - 1;
} else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
/* we have everything we need */
} else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
return -EINVAL;
}
- if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
+ if (first > last || first < 0 ||
+ last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) {
+ DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
+ return -EINVAL;
+ }
+
+ if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
+ DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
+ DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
+ return -EINVAL;
+ }
+
+ for (i = first; i <= last; i++)
+ qid_thresh->val[i] = thresh; /* indexed by qid */
+
+ return 0;
+}
+
+static int
+set_qid_depth_thresh_v2_5(const char *key __rte_unused,
+ const char *value,
+ void *opaque)
+{
+ struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
+ int first, last, thresh, i;
+
+ if (value == NULL || opaque == NULL) {
+ DLB2_LOG_ERR("NULL pointer\n");
+ return -EINVAL;
+ }
+
+ /* command line override may take one of the following 3 forms:
+ * qid_depth_thresh=all:<threshold_value> ... all queues
+ * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
+ * qid_depth_thresh=qid:<threshold_value> ... just one queue
+ */
+ if (sscanf(value, "all:%d", &thresh) == 1) {
+ first = 0;
+ last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) - 1;
+ } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
+ /* we have everything we need */
+ } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
+ last = first;
+ } else {
+ DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
+ return -EINVAL;
+ }
+
+ if (first > last || first < 0 ||
+ last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) {
DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
return -EINVAL;
}
for (i = 0; i < dlb2->num_queues; i++)
dlb2->ev_queues[i].qm_queue.config_state = config_state;
- for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++)
+ for (i = 0; i < DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5); i++)
dlb2->ev_queues[i].setup_done = false;
dlb2->num_ports = 0;
dlb2 = dlb2_pmd_priv(dev);
- if (ev_port_id >= DLB2_MAX_NUM_PORTS)
+ if (ev_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
return -EINVAL;
if (port_conf->dequeue_depth >
}
/* Initialize each port's token pop mode */
- for (i = 0; i < DLB2_MAX_NUM_PORTS; i++)
+ for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++)
dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
rte_spinlock_init(&dlb2->qm_instance.resource_lock);
int
dlb2_parse_params(const char *params,
const char *name,
- struct dlb2_devargs *dlb2_args)
+ struct dlb2_devargs *dlb2_args,
+ uint8_t version)
{
int ret = 0;
static const char * const args[] = { NUMA_NODE_ARG,
return ret;
}
- ret = rte_kvargs_process(kvlist,
+ if (version == DLB2_HW_V2) {
+ ret = rte_kvargs_process(kvlist,
DLB2_NUM_DIR_CREDITS,
set_num_dir_credits,
&dlb2_args->num_dir_credits_override);
- if (ret != 0) {
- DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
- name);
- rte_kvargs_free(kvlist);
- return ret;
+ if (ret != 0) {
+ DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
}
-
ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
set_dev_id,
&dlb2_args->dev_id);
return ret;
}
- ret = rte_kvargs_process(
+ if (version == DLB2_HW_V2) {
+ ret = rte_kvargs_process(
kvlist,
DLB2_QID_DEPTH_THRESH_ARG,
set_qid_depth_thresh,
&dlb2_args->qid_depth_thresholds);
+ } else {
+ ret = rte_kvargs_process(
+ kvlist,
+ DLB2_QID_DEPTH_THRESH_ARG,
+ set_qid_depth_thresh_v2_5,
+ &dlb2_args->qid_depth_thresholds);
+ }
if (ret != 0) {
DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
name);
/* Begin HW related defines and structs */
+#define DLB2_HW_V2 0
+#define DLB2_HW_V2_5 1
#define DLB2_MAX_NUM_DOMAINS 32
#define DLB2_MAX_NUM_VFS 16
#define DLB2_MAX_NUM_LDB_QUEUES 32
#define DLB2_MAX_NUM_LDB_PORTS 64
-#define DLB2_MAX_NUM_DIR_PORTS 64
-#define DLB2_MAX_NUM_DIR_QUEUES 64
+#define DLB2_MAX_NUM_DIR_PORTS_V2 DLB2_MAX_NUM_DIR_QUEUES_V2
+#define DLB2_MAX_NUM_DIR_PORTS_V2_5 DLB2_MAX_NUM_DIR_QUEUES_V2_5
+#define DLB2_MAX_NUM_DIR_PORTS(ver) (ver == DLB2_HW_V2 ? \
+ DLB2_MAX_NUM_DIR_PORTS_V2 : \
+ DLB2_MAX_NUM_DIR_PORTS_V2_5)
+#define DLB2_MAX_NUM_DIR_QUEUES_V2 64 /* DIR == directed */
+#define DLB2_MAX_NUM_DIR_QUEUES_V2_5 96
+/* When needed for array sizing, the DLB 2.5 macro is used */
+#define DLB2_MAX_NUM_DIR_QUEUES(ver) (ver == DLB2_HW_V2 ? \
+ DLB2_MAX_NUM_DIR_QUEUES_V2 : \
+ DLB2_MAX_NUM_DIR_QUEUES_V2_5)
#define DLB2_MAX_NUM_FLOWS (64 * 1024)
#define DLB2_MAX_NUM_LDB_CREDITS (8 * 1024)
-#define DLB2_MAX_NUM_DIR_CREDITS (2 * 1024)
+#define DLB2_MAX_NUM_DIR_CREDITS(ver) (ver == DLB2_HW_V2 ? 4096 : 0)
+#define DLB2_MAX_NUM_CREDITS(ver) (ver == DLB2_HW_V2 ? \
+ 0 : DLB2_MAX_NUM_LDB_CREDITS)
#define DLB2_MAX_NUM_LDB_CREDIT_POOLS 64
#define DLB2_MAX_NUM_DIR_CREDIT_POOLS 64
#define DLB2_MAX_NUM_HIST_LIST_ENTRIES 2048
-#define DLB2_MAX_NUM_AQOS_ENTRIES 2048
#define DLB2_MAX_NUM_QIDS_PER_LDB_CQ 8
#define DLB2_QID_PRIORITIES 8
#define DLB2_MAX_DEVICE_PATH 32
#define DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
DLB2_MAX_CQ_DEPTH
+#define DLB2_HW_DEVICE_FROM_PCI_ID(_pdev) \
+ (((_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_PF) || \
+ (_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_VF)) ? \
+ DLB2_HW_V2_5 : DLB2_HW_V2)
+
/*
* Static per queue/port provisioning values
*/
DLB2_NUM_QUEUE_TYPES /* Must be last */
};
+#define DLB2_COMBINED_POOL DLB2_LDB_QUEUE
+
#define PORT_TYPE(p) ((p)->is_directed ? DLB2_DIR_PORT : DLB2_LDB_PORT)
/* Do not change - must match hardware! */
uint32_t num_ldb_queues; /* Number of available ldb queues */
uint32_t num_ldb_ports; /* Number of load balanced ports */
uint32_t num_dir_ports; /* Number of directed ports */
- uint32_t num_ldb_credits; /* Number of load balanced credits */
- uint32_t num_dir_credits; /* Number of directed credits */
+ union {
+ struct {
+ uint32_t num_ldb_credits; /* Number of ldb credits */
+ uint32_t num_dir_credits; /* Number of dir credits */
+ };
+ struct {
+ uint32_t num_credits; /* Number of combined credits */
+ };
+ };
uint32_t reorder_window_size; /* Size of reorder window */
};
enum dlb2_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
- uint16_t cached_ldb_credits;
- uint16_t ldb_credits;
- uint16_t cached_dir_credits;
+ union {
+ struct {
+ uint16_t cached_ldb_credits;
+ uint16_t ldb_credits;
+ uint16_t cached_dir_credits;
+ };
+ struct {
+ uint16_t cached_credits;
+ uint16_t credits;
+ };
+ };
bool int_armed;
uint16_t owed_tokens;
int16_t issued_releases;
struct dlb2_eventdev;
+struct dlb2_port_low_level_io_functions {
+ void (*pp_enqueue_four)(void *qe4, void *pp_addr);
+};
+
struct dlb2_config {
int configured;
int reserved;
- uint32_t num_ldb_credits;
- uint32_t num_dir_credits;
+ union {
+ struct {
+ uint32_t num_ldb_credits;
+ uint32_t num_dir_credits;
+ };
+ struct {
+ uint32_t num_credits;
+ };
+ };
struct dlb2_create_sched_domain_args resources;
};
/* Begin DLB2 PMD Eventdev related defines and structs */
-#define DLB2_MAX_NUM_QUEUES \
- (DLB2_MAX_NUM_DIR_QUEUES + DLB2_MAX_NUM_LDB_QUEUES)
+#define DLB2_MAX_NUM_QUEUES(ver) \
+ (DLB2_MAX_NUM_DIR_QUEUES(ver) + DLB2_MAX_NUM_LDB_QUEUES)
-#define DLB2_MAX_NUM_PORTS (DLB2_MAX_NUM_DIR_PORTS + DLB2_MAX_NUM_LDB_PORTS)
+#define DLB2_MAX_NUM_PORTS(ver) \
+ (DLB2_MAX_NUM_DIR_PORTS(ver) + DLB2_MAX_NUM_LDB_PORTS)
+
+#define DLB2_MAX_NUM_DIR_QUEUES_V2_5 96
+#define DLB2_MAX_NUM_DIR_PORTS_V2_5 DLB2_MAX_NUM_DIR_QUEUES_V2_5
+#define DLB2_MAX_NUM_QUEUES_ALL \
+ (DLB2_MAX_NUM_DIR_QUEUES_V2_5 + DLB2_MAX_NUM_LDB_QUEUES)
+#define DLB2_MAX_NUM_PORTS_ALL \
+ (DLB2_MAX_NUM_DIR_PORTS_V2_5 + DLB2_MAX_NUM_LDB_PORTS)
#define DLB2_MAX_INPUT_QUEUE_DEPTH 256
/** Structure to hold the queue to port link establishment attributes */
uint64_t tx_ok;
uint64_t total_polls;
uint64_t zero_polls;
- uint64_t tx_nospc_ldb_hw_credits;
- uint64_t tx_nospc_dir_hw_credits;
+ union {
+ struct {
+ uint64_t tx_nospc_ldb_hw_credits;
+ uint64_t tx_nospc_dir_hw_credits;
+ };
+ struct {
+ uint64_t tx_nospc_hw_credits;
+ };
+ };
uint64_t tx_nospc_inflight_max;
uint64_t tx_nospc_new_event_limit;
uint64_t tx_nospc_inflight_credits;
uint64_t tx_invalid;
uint64_t rx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES];
uint64_t rx_sched_invalid;
- struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES];
+ struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES_ALL];
};
struct dlb2_eventdev_port {
};
struct dlb2_eventdev {
- struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS];
- struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES];
- uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES];
- uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES];
+ struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS_ALL];
+ struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES_ALL];
+ uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL];
+ uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL];
/* store num stats and offset of the stats for each queue */
- uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES];
- uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES];
+ uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES_ALL];
+ uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES_ALL];
/* store num stats and offset of the stats for each port */
- uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS];
- uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS];
+ uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS_ALL];
+ uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS_ALL];
struct dlb2_get_num_resources_args hw_rsrc_query_results;
uint32_t xstats_count_mode_queue;
struct dlb2_hw_dev qm_instance; /* strictly hw related */
int num_dir_credits_override;
volatile enum dlb2_run_state run_state;
uint16_t num_dir_queues; /* total num of evdev dir queues requested */
- uint16_t num_dir_credits;
- uint16_t num_ldb_credits;
+ union {
+ struct {
+ uint16_t num_dir_credits;
+ uint16_t num_ldb_credits;
+ };
+ struct {
+ uint16_t num_credits;
+ };
+ };
uint16_t num_queues; /* total queues */
uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
uint16_t num_ports; /* total num of evdev ports requested */
bool defer_sched;
enum dlb2_cq_poll_modes poll_mode;
uint8_t revision;
+ uint8_t version;
bool configured;
- uint16_t max_ldb_credits;
- uint16_t max_dir_credits;
-
- /* force hw credit pool counters into exclusive cache lines */
-
- /* use __atomic builtins */ /* shared hw cred */
- uint32_t ldb_credit_pool __rte_cache_aligned;
- /* use __atomic builtins */ /* shared hw cred */
- uint32_t dir_credit_pool __rte_cache_aligned;
+ union {
+ struct {
+ uint16_t max_ldb_credits;
+ uint16_t max_dir_credits;
+ /* use __atomic builtins */ /* shared hw cred */
+ uint32_t ldb_credit_pool __rte_cache_aligned;
+ /* use __atomic builtins */ /* shared hw cred */
+ uint32_t dir_credit_pool __rte_cache_aligned;
+ };
+ struct {
+ uint16_t max_credits;
+ /* use __atomic builtins */ /* shared hw cred */
+ uint32_t credit_pool __rte_cache_aligned;
+ };
+ };
};
/* used for collecting and passing around the dev args */
struct dlb2_qid_depth_thresholds {
- int val[DLB2_MAX_NUM_QUEUES];
+ int val[DLB2_MAX_NUM_QUEUES_ALL];
};
struct dlb2_devargs {
int dlb2_parse_params(const char *params,
const char *name,
- struct dlb2_devargs *dlb2_args);
+ struct dlb2_devargs *dlb2_args,
+ uint8_t version);
/* Extern globals */
extern struct process_local_port_data dlb2_port[][DLB2_NUM_PORT_TYPES];
int i;
uint64_t val = 0;
- for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
+ for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) {
struct dlb2_eventdev_port *port = &dlb2->ev_ports[i];
if (!port->setup_done)
int port = 0;
uint64_t tally = 0;
- for (port = 0; port < DLB2_MAX_NUM_PORTS; port++)
+ for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
tally += dlb2->ev_ports[port].stats.queue[qid].qid_depth[stat];
return tally;
int port = 0;
uint64_t enq_ok_tally = 0;
- for (port = 0; port < DLB2_MAX_NUM_PORTS; port++)
+ for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
enq_ok_tally += dlb2->ev_ports[port].stats.queue[qid].enq_ok;
return enq_ok_tally;
/* other vars */
const unsigned int count = RTE_DIM(dev_stats) +
- DLB2_MAX_NUM_PORTS * RTE_DIM(port_stats) +
- DLB2_MAX_NUM_QUEUES * RTE_DIM(qid_stats);
+ DLB2_MAX_NUM_PORTS(dlb2->version) * RTE_DIM(port_stats) +
+ DLB2_MAX_NUM_QUEUES(dlb2->version) * RTE_DIM(qid_stats);
unsigned int i, port, qid, stat_id = 0;
dlb2->xstats = rte_zmalloc_socket(NULL,
}
dlb2->xstats_count_mode_dev = stat_id;
- for (port = 0; port < DLB2_MAX_NUM_PORTS; port++) {
+ for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) {
dlb2->xstats_offset_for_port[port] = stat_id;
uint32_t count_offset = stat_id;
dlb2->xstats_count_mode_port = stat_id - dlb2->xstats_count_mode_dev;
- for (qid = 0; qid < DLB2_MAX_NUM_QUEUES; qid++) {
+ for (qid = 0; qid < DLB2_MAX_NUM_QUEUES(dlb2->version); qid++) {
uint32_t count_offset = stat_id;
dlb2->xstats_offset_for_qid[qid] = stat_id;
xstats_mode_count = dlb2->xstats_count_mode_dev;
break;
case RTE_EVENT_DEV_XSTATS_PORT:
- if (queue_port_id >= DLB2_MAX_NUM_PORTS)
+ if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
break;
xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
start_offset = dlb2->xstats_offset_for_port[queue_port_id];
break;
case RTE_EVENT_DEV_XSTATS_QUEUE:
-#if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
- if (queue_port_id >= DLB2_MAX_NUM_QUEUES)
+ if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version) &&
+ (DLB2_MAX_NUM_QUEUES(dlb2->version) <= 255))
break;
-#endif
xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
start_offset = dlb2->xstats_offset_for_qid[queue_port_id];
break;
xstats_mode_count = dlb2->xstats_count_mode_dev;
break;
case RTE_EVENT_DEV_XSTATS_PORT:
- if (queue_port_id >= DLB2_MAX_NUM_PORTS)
+ if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
goto invalid_value;
xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
break;
case RTE_EVENT_DEV_XSTATS_QUEUE:
-#if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
- if (queue_port_id >= DLB2_MAX_NUM_QUEUES)
+#if (DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) <= 255) /* max 8 bit value */
+ if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version))
goto invalid_value;
#endif
xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
break;
case RTE_EVENT_DEV_XSTATS_PORT:
if (queue_port_id == -1) {
- for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
+ for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version);
+ i++) {
if (dlb2_xstats_reset_port(dlb2, i,
ids, nb_ids))
return -EINVAL;
}
- } else if (queue_port_id < DLB2_MAX_NUM_PORTS) {
+ } else if (queue_port_id < DLB2_MAX_NUM_PORTS(dlb2->version)) {
if (dlb2_xstats_reset_port(dlb2, queue_port_id,
ids, nb_ids))
return -EINVAL;
break;
case RTE_EVENT_DEV_XSTATS_QUEUE:
if (queue_port_id == -1) {
- for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++) {
+ for (i = 0; i < DLB2_MAX_NUM_QUEUES(dlb2->version);
+ i++) {
if (dlb2_xstats_reset_queue(dlb2, i,
ids, nb_ids))
return -EINVAL;
}
- } else if (queue_port_id < DLB2_MAX_NUM_QUEUES) {
+ } else if (queue_port_id < DLB2_MAX_NUM_QUEUES(dlb2->version)) {
if (dlb2_xstats_reset_queue(dlb2, queue_port_id,
ids, nb_ids))
return -EINVAL;
#include "dlb2_osdep_types.h"
#define DLB2_MAX_NUM_VDEVS 16
-#define DLB2_MAX_NUM_AQED_ENTRIES 2048
#define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2
-#define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES 5
-
#define DLB2_NUM_ARB_WEIGHTS 8
+#define DLB2_MAX_NUM_AQED_ENTRIES 2048
#define DLB2_MAX_WEIGHT 255
#define DLB2_NUM_COS_DOMAINS 4
+#define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2
+#define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES 5
#define DLB2_MAX_CQ_COMP_CHECK_LOOPS 409600
#define DLB2_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30))
+
+#define DLB2_FUNC_BAR 0
+#define DLB2_CSR_BAR 2
+
#define PCI_DEVICE_ID_INTEL_DLB2_PF 0x2710
#define PCI_DEVICE_ID_INTEL_DLB2_VF 0x2711
+#define PCI_DEVICE_ID_INTEL_DLB2_5_PF 0x2714
+#define PCI_DEVICE_ID_INTEL_DLB2_5_VF 0x2715
+
#define DLB2_ALARM_HW_SOURCE_SYS 0
#define DLB2_ALARM_HW_SOURCE_DLB 1
#define DLB2_DIR_PP_BASE 0x2000000
#define DLB2_DIR_PP_STRIDE 0x1000
#define DLB2_DIR_PP_BOUND (DLB2_DIR_PP_BASE + \
- DLB2_DIR_PP_STRIDE * DLB2_MAX_NUM_DIR_PORTS)
+ DLB2_DIR_PP_STRIDE * \
+ DLB2_MAX_NUM_DIR_PORTS_V2_5)
#define DLB2_DIR_PP_OFFS(id) (DLB2_DIR_PP_BASE + (id) * DLB2_PP_SIZE)
struct dlb2_resource_id {
static inline bool dlb2_sn_group_full(struct dlb2_sn_group *group)
{
- u32 mask[] = {
+ const u32 mask[] = {
0x0000ffff, /* 64 SNs per queue */
0x000000ff, /* 128 SNs per queue */
0x0000000f, /* 256 SNs per queue */
static inline int dlb2_sn_group_alloc_slot(struct dlb2_sn_group *group)
{
- u32 bound[6] = {16, 8, 4, 2, 1};
+ const u32 bound[] = {16, 8, 4, 2, 1};
u32 i;
for (i = 0; i < bound[group->mode]; i++) {
struct dlb2_hw_resources {
struct dlb2_ldb_queue ldb_queues[DLB2_MAX_NUM_LDB_QUEUES];
struct dlb2_ldb_port ldb_ports[DLB2_MAX_NUM_LDB_PORTS];
- struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS];
+ struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS_V2_5];
struct dlb2_sn_group sn_groups[DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
};
};
struct dlb2_hw {
+ uint8_t ver;
+
/* BAR 0 address */
- void *csr_kva;
+ void *csr_kva;
unsigned long csr_phys_addr;
/* BAR 2 address */
- void *func_kva;
+ void *func_kva;
unsigned long func_phys_addr;
/* Resource tracking */
&port->func_list);
}
- hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS;
+ hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
list = &hw->rsrcs.dir_pq_pairs[i].func_list;
}
hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
- hw->pf.num_avail_dqed_entries = DLB2_MAX_NUM_DIR_CREDITS;
+ hw->pf.num_avail_dqed_entries =
+ DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
+
hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
}
- for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS; i++) {
+ for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
}
else
virt_id = port->id.phys_id;
- offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
+ offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val);
}
dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
struct dlb2_hw_domain *domain)
{
- int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS;
+ int domain_offset = domain->id.phys_id *
+ DLB2_MAX_NUM_DIR_PORTS(hw->ver);
struct dlb2_list_entry *iter;
struct dlb2_dir_pq_pair *queue;
RTE_SET_USED(iter);
DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val);
if (queue->id.vdev_owned) {
- idx = queue->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS +
+ idx = queue->id.vdev_id *
+ DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
queue->id.virt_id;
DLB2_CSR_WR(hw,
else
virt_id = port->id.phys_id;
- offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
+ offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver)
+ + virt_id;
DLB2_CSR_WR(hw,
DLB2_SYS_VF_DIR_VPP2PP(offs),
}
static struct dlb2_dir_pq_pair *
-dlb2_get_domain_used_dir_pq(u32 id,
+dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
+ u32 id,
bool vdev_req,
struct dlb2_hw_domain *domain)
{
struct dlb2_dir_pq_pair *port;
RTE_SET_USED(iter);
- if (id >= DLB2_MAX_NUM_DIR_PORTS)
+ if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
return NULL;
DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
if (args->queue_id != -1) {
struct dlb2_dir_pq_pair *queue;
- queue = dlb2_get_domain_used_dir_pq(args->queue_id,
+ queue = dlb2_get_domain_used_dir_pq(hw,
+ args->queue_id,
vdev_req,
domain);
r1.field.pp = port->id.phys_id;
- offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
+ offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), r1.val);
domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
if (args->queue_id != -1)
- port = dlb2_get_domain_used_dir_pq(args->queue_id,
+ port = dlb2_get_domain_used_dir_pq(hw,
+ args->queue_id,
vdev_req,
domain);
else
/* QID write permissions are turned on when the domain is started */
r0.field.vasqid_v = 0;
- offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES +
+ offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
queue->id.phys_id;
DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
union dlb2_sys_vf_dir_vqid_v r3 = { {0} };
union dlb2_sys_vf_dir_vqid2qid r4 = { {0} };
- offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES + queue->id.virt_id;
+ offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver)
+ + queue->id.virt_id;
r3.field.vqid_v = 1;
if (args->port_id != -1) {
struct dlb2_dir_pq_pair *port;
- port = dlb2_get_domain_used_dir_pq(args->port_id,
+ port = dlb2_get_domain_used_dir_pq(hw,
+ args->port_id,
vdev_req,
domain);
}
if (args->port_id != -1)
- queue = dlb2_get_domain_used_dir_pq(args->port_id,
+ queue = dlb2_get_domain_used_dir_pq(hw,
+ args->port_id,
vdev_req,
domain);
else
r0.field.vasqid_v = 1;
- offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS +
+ offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
dir_queue->id.phys_id;
DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
id = args->queue_id;
- queue = dlb2_get_domain_used_dir_pq(id, vdev_req, domain);
+ queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
if (queue == NULL) {
resp->status = DLB2_ST_INVALID_QID;
return -EINVAL;
{
int i;
/* Addresses will be initialized at port create */
- for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
+ for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
/* First directed ports */
dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
+ dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
/* Probe the DLB2 PF layer */
dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
if (pci_dev->device.devargs) {
ret = dlb2_parse_params(pci_dev->device.devargs->args,
pci_dev->device.devargs->name,
- &dlb2_args);
+ &dlb2_args,
+ dlb2->version);
if (ret) {
DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
ret, rte_errno);
event_dlb2_pf_name,
&dlb2_args);
} else {
+ dlb2 = dlb2_pmd_priv(eventdev);
+ dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
ret = dlb2_secondary_eventdev_probe(eventdev,
event_dlb2_pf_name);
}
},
};
+static const struct rte_pci_id pci_id_dlb2_5_map[] = {
+ {
+ RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
+ PCI_DEVICE_ID_INTEL_DLB2_5_PF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
static int
event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev)
}
+static int
+event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ int ret;
+
+ ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
+ sizeof(struct dlb2_eventdev),
+ dlb2_eventdev_pci_init,
+ event_dlb2_pf_name);
+ if (ret) {
+ DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
+ "ret=%d\n", ret);
+ }
+
+ return ret;
+}
+
+static int
+event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev)
+{
+ int ret;
+
+ ret = rte_event_pmd_pci_remove(pci_dev, NULL);
+
+ if (ret) {
+ DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
+ "ret=%d\n", ret);
+ }
+
+ return ret;
+
+}
+
static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
.id_table = pci_id_dlb2_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.remove = event_dlb2_pci_remove,
};
+static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = {
+ .id_table = pci_id_dlb2_5_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = event_dlb2_5_pci_probe,
+ .remove = event_dlb2_5_pci_remove,
+};
+
RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
+
+RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map);