From b66a418d2ad31d53994451a6fc517b0713bffb44 Mon Sep 17 00:00:00 2001 From: Timothy McDaniel Date: Sat, 1 May 2021 14:03:37 -0500 Subject: [PATCH] event/dlb2: add v2.5 probe This commit adds dlb v2.5 probe support, and updates parameter parsing. The dlb v2.5 device differs from dlb v2, in that the number of resources (ports, queues, ...) is different, so macros have been added to take the device version into account. Signed-off-by: Timothy McDaniel --- drivers/event/dlb2/dlb2.c | 99 +++++++++++--- drivers/event/dlb2/dlb2_priv.h | 151 +++++++++++++++------ drivers/event/dlb2/dlb2_xstats.c | 37 ++--- drivers/event/dlb2/pf/base/dlb2_hw_types.h | 28 ++-- drivers/event/dlb2/pf/base/dlb2_resource.c | 47 ++++--- drivers/event/dlb2/pf/dlb2_pf.c | 62 ++++++++- 6 files changed, 319 insertions(+), 105 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index fb5ff012a4..7f5b9141b4 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -59,7 +59,8 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH, .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ, .max_num_events = DLB2_MAX_NUM_LDB_CREDITS, - .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS, + .max_single_link_event_port_queue_pairs = + DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2), .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS | RTE_EVENT_DEV_CAP_EVENT_QOS | RTE_EVENT_DEV_CAP_BURST_MODE | @@ -69,7 +70,7 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { }; struct process_local_port_data -dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES]; +dlb2_port[DLB2_MAX_NUM_PORTS_ALL][DLB2_NUM_PORT_TYPES]; static void dlb2_free_qe_mem(struct dlb2_port *qm_port) @@ -97,7 +98,7 @@ dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2, { int q; - for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) { + for (q = 0; q < DLB2_MAX_NUM_QUEUES(dlb2->version); q++) { if (qid_depth_thresholds[q] != 0) dlb2->ev_queues[q].depth_threshold = qid_depth_thresholds[q]; @@ -247,9 +248,9 @@ set_num_dir_credits(const char *key __rte_unused, return ret; if (*num_dir_credits < 0 || - *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) { + *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) { DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n", - DLB2_MAX_NUM_DIR_CREDITS); + DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)); return -EINVAL; } @@ -306,7 +307,6 @@ set_cos(const char *key __rte_unused, return 0; } - static int set_qid_depth_thresh(const char *key __rte_unused, const char *value, @@ -327,7 +327,7 @@ set_qid_depth_thresh(const char *key __rte_unused, */ if (sscanf(value, "all:%d", &thresh) == 1) { first = 0; - last = DLB2_MAX_NUM_QUEUES - 1; + last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2) - 1; } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) { /* we have everything we need */ } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) { @@ -337,7 +337,56 @@ set_qid_depth_thresh(const char *key __rte_unused, return -EINVAL; } - if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) { + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) { + DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n"); + return -EINVAL; + } + + if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) { + DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n", + DLB2_MAX_QUEUE_DEPTH_THRESHOLD); + return -EINVAL; + } + + for (i = first; i <= last; i++) + qid_thresh->val[i] = thresh; /* indexed by qid */ + + return 0; +} + +static int +set_qid_depth_thresh_v2_5(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_qid_depth_thresholds *qid_thresh = opaque; + int first, last, thresh, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + /* command line override may take one of the following 3 forms: + * qid_depth_thresh=all: ... all queues + * qid_depth_thresh=qidA-qidB: ... a range of queues + * qid_depth_thresh=qid: ... just one queue + */ + if (sscanf(value, "all:%d", &thresh) == 1) { + first = 0; + last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) { DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n"); return -EINVAL; } @@ -521,7 +570,7 @@ dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig) for (i = 0; i < dlb2->num_queues; i++) dlb2->ev_queues[i].qm_queue.config_state = config_state; - for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++) + for (i = 0; i < DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5); i++) dlb2->ev_queues[i].setup_done = false; dlb2->num_ports = 0; @@ -1453,7 +1502,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, dlb2 = dlb2_pmd_priv(dev); - if (ev_port_id >= DLB2_MAX_NUM_PORTS) + if (ev_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version)) return -EINVAL; if (port_conf->dequeue_depth > @@ -3895,7 +3944,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, } /* Initialize each port's token pop mode */ - for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) + for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP; rte_spinlock_init(&dlb2->qm_instance.resource_lock); @@ -3945,7 +3994,8 @@ dlb2_secondary_eventdev_probe(struct rte_eventdev *dev, int dlb2_parse_params(const char *params, const char *name, - struct dlb2_devargs *dlb2_args) + struct dlb2_devargs *dlb2_args, + uint8_t version) { int ret = 0; static const char * const args[] = { NUMA_NODE_ARG, @@ -3984,17 +4034,18 @@ dlb2_parse_params(const char *params, return ret; } - ret = rte_kvargs_process(kvlist, + if (version == DLB2_HW_V2) { + ret = rte_kvargs_process(kvlist, DLB2_NUM_DIR_CREDITS, set_num_dir_credits, &dlb2_args->num_dir_credits_override); - if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter", - name); - rte_kvargs_free(kvlist); - return ret; + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } } - ret = rte_kvargs_process(kvlist, DEV_ID_ARG, set_dev_id, &dlb2_args->dev_id); @@ -4005,11 +4056,19 @@ dlb2_parse_params(const char *params, return ret; } - ret = rte_kvargs_process( + if (version == DLB2_HW_V2) { + ret = rte_kvargs_process( kvlist, DLB2_QID_DEPTH_THRESH_ARG, set_qid_depth_thresh, &dlb2_args->qid_depth_thresholds); + } else { + ret = rte_kvargs_process( + kvlist, + DLB2_QID_DEPTH_THRESH_ARG, + set_qid_depth_thresh_v2_5, + &dlb2_args->qid_depth_thresholds); + } if (ret != 0) { DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter", name); diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index eb1a932399..1cd78ad946 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -33,19 +33,31 @@ /* Begin HW related defines and structs */ +#define DLB2_HW_V2 0 +#define DLB2_HW_V2_5 1 #define DLB2_MAX_NUM_DOMAINS 32 #define DLB2_MAX_NUM_VFS 16 #define DLB2_MAX_NUM_LDB_QUEUES 32 #define DLB2_MAX_NUM_LDB_PORTS 64 -#define DLB2_MAX_NUM_DIR_PORTS 64 -#define DLB2_MAX_NUM_DIR_QUEUES 64 +#define DLB2_MAX_NUM_DIR_PORTS_V2 DLB2_MAX_NUM_DIR_QUEUES_V2 +#define DLB2_MAX_NUM_DIR_PORTS_V2_5 DLB2_MAX_NUM_DIR_QUEUES_V2_5 +#define DLB2_MAX_NUM_DIR_PORTS(ver) (ver == DLB2_HW_V2 ? \ + DLB2_MAX_NUM_DIR_PORTS_V2 : \ + DLB2_MAX_NUM_DIR_PORTS_V2_5) +#define DLB2_MAX_NUM_DIR_QUEUES_V2 64 /* DIR == directed */ +#define DLB2_MAX_NUM_DIR_QUEUES_V2_5 96 +/* When needed for array sizing, the DLB 2.5 macro is used */ +#define DLB2_MAX_NUM_DIR_QUEUES(ver) (ver == DLB2_HW_V2 ? \ + DLB2_MAX_NUM_DIR_QUEUES_V2 : \ + DLB2_MAX_NUM_DIR_QUEUES_V2_5) #define DLB2_MAX_NUM_FLOWS (64 * 1024) #define DLB2_MAX_NUM_LDB_CREDITS (8 * 1024) -#define DLB2_MAX_NUM_DIR_CREDITS (2 * 1024) +#define DLB2_MAX_NUM_DIR_CREDITS(ver) (ver == DLB2_HW_V2 ? 4096 : 0) +#define DLB2_MAX_NUM_CREDITS(ver) (ver == DLB2_HW_V2 ? \ + 0 : DLB2_MAX_NUM_LDB_CREDITS) #define DLB2_MAX_NUM_LDB_CREDIT_POOLS 64 #define DLB2_MAX_NUM_DIR_CREDIT_POOLS 64 #define DLB2_MAX_NUM_HIST_LIST_ENTRIES 2048 -#define DLB2_MAX_NUM_AQOS_ENTRIES 2048 #define DLB2_MAX_NUM_QIDS_PER_LDB_CQ 8 #define DLB2_QID_PRIORITIES 8 #define DLB2_MAX_DEVICE_PATH 32 @@ -68,6 +80,11 @@ #define DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \ DLB2_MAX_CQ_DEPTH +#define DLB2_HW_DEVICE_FROM_PCI_ID(_pdev) \ + (((_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_PF) || \ + (_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_VF)) ? \ + DLB2_HW_V2_5 : DLB2_HW_V2) + /* * Static per queue/port provisioning values */ @@ -109,6 +126,8 @@ enum dlb2_hw_queue_types { DLB2_NUM_QUEUE_TYPES /* Must be last */ }; +#define DLB2_COMBINED_POOL DLB2_LDB_QUEUE + #define PORT_TYPE(p) ((p)->is_directed ? DLB2_DIR_PORT : DLB2_LDB_PORT) /* Do not change - must match hardware! */ @@ -127,8 +146,15 @@ struct dlb2_hw_rsrcs { uint32_t num_ldb_queues; /* Number of available ldb queues */ uint32_t num_ldb_ports; /* Number of load balanced ports */ uint32_t num_dir_ports; /* Number of directed ports */ - uint32_t num_ldb_credits; /* Number of load balanced credits */ - uint32_t num_dir_credits; /* Number of directed credits */ + union { + struct { + uint32_t num_ldb_credits; /* Number of ldb credits */ + uint32_t num_dir_credits; /* Number of dir credits */ + }; + struct { + uint32_t num_credits; /* Number of combined credits */ + }; + }; uint32_t reorder_window_size; /* Size of reorder window */ }; @@ -292,9 +318,17 @@ struct dlb2_port { enum dlb2_token_pop_mode token_pop_mode; union dlb2_port_config cfg; uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */ - uint16_t cached_ldb_credits; - uint16_t ldb_credits; - uint16_t cached_dir_credits; + union { + struct { + uint16_t cached_ldb_credits; + uint16_t ldb_credits; + uint16_t cached_dir_credits; + }; + struct { + uint16_t cached_credits; + uint16_t credits; + }; + }; bool int_armed; uint16_t owed_tokens; int16_t issued_releases; @@ -325,11 +359,22 @@ struct process_local_port_data { struct dlb2_eventdev; +struct dlb2_port_low_level_io_functions { + void (*pp_enqueue_four)(void *qe4, void *pp_addr); +}; + struct dlb2_config { int configured; int reserved; - uint32_t num_ldb_credits; - uint32_t num_dir_credits; + union { + struct { + uint32_t num_ldb_credits; + uint32_t num_dir_credits; + }; + struct { + uint32_t num_credits; + }; + }; struct dlb2_create_sched_domain_args resources; }; @@ -354,10 +399,18 @@ struct dlb2_hw_dev { /* Begin DLB2 PMD Eventdev related defines and structs */ -#define DLB2_MAX_NUM_QUEUES \ - (DLB2_MAX_NUM_DIR_QUEUES + DLB2_MAX_NUM_LDB_QUEUES) +#define DLB2_MAX_NUM_QUEUES(ver) \ + (DLB2_MAX_NUM_DIR_QUEUES(ver) + DLB2_MAX_NUM_LDB_QUEUES) -#define DLB2_MAX_NUM_PORTS (DLB2_MAX_NUM_DIR_PORTS + DLB2_MAX_NUM_LDB_PORTS) +#define DLB2_MAX_NUM_PORTS(ver) \ + (DLB2_MAX_NUM_DIR_PORTS(ver) + DLB2_MAX_NUM_LDB_PORTS) + +#define DLB2_MAX_NUM_DIR_QUEUES_V2_5 96 +#define DLB2_MAX_NUM_DIR_PORTS_V2_5 DLB2_MAX_NUM_DIR_QUEUES_V2_5 +#define DLB2_MAX_NUM_QUEUES_ALL \ + (DLB2_MAX_NUM_DIR_QUEUES_V2_5 + DLB2_MAX_NUM_LDB_QUEUES) +#define DLB2_MAX_NUM_PORTS_ALL \ + (DLB2_MAX_NUM_DIR_PORTS_V2_5 + DLB2_MAX_NUM_LDB_PORTS) #define DLB2_MAX_INPUT_QUEUE_DEPTH 256 /** Structure to hold the queue to port link establishment attributes */ @@ -377,8 +430,15 @@ struct dlb2_traffic_stats { uint64_t tx_ok; uint64_t total_polls; uint64_t zero_polls; - uint64_t tx_nospc_ldb_hw_credits; - uint64_t tx_nospc_dir_hw_credits; + union { + struct { + uint64_t tx_nospc_ldb_hw_credits; + uint64_t tx_nospc_dir_hw_credits; + }; + struct { + uint64_t tx_nospc_hw_credits; + }; + }; uint64_t tx_nospc_inflight_max; uint64_t tx_nospc_new_event_limit; uint64_t tx_nospc_inflight_credits; @@ -411,7 +471,7 @@ struct dlb2_port_stats { uint64_t tx_invalid; uint64_t rx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES]; uint64_t rx_sched_invalid; - struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES]; + struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES_ALL]; }; struct dlb2_eventdev_port { @@ -462,16 +522,16 @@ enum dlb2_run_state { }; struct dlb2_eventdev { - struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS]; - struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES]; - uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES]; - uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES]; + struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS_ALL]; + struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES_ALL]; + uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL]; + uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL]; /* store num stats and offset of the stats for each queue */ - uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES]; - uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES]; + uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES_ALL]; + uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES_ALL]; /* store num stats and offset of the stats for each port */ - uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS]; - uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS]; + uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS_ALL]; + uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS_ALL]; struct dlb2_get_num_resources_args hw_rsrc_query_results; uint32_t xstats_count_mode_queue; struct dlb2_hw_dev qm_instance; /* strictly hw related */ @@ -487,8 +547,15 @@ struct dlb2_eventdev { int num_dir_credits_override; volatile enum dlb2_run_state run_state; uint16_t num_dir_queues; /* total num of evdev dir queues requested */ - uint16_t num_dir_credits; - uint16_t num_ldb_credits; + union { + struct { + uint16_t num_dir_credits; + uint16_t num_ldb_credits; + }; + struct { + uint16_t num_credits; + }; + }; uint16_t num_queues; /* total queues */ uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */ uint16_t num_ports; /* total num of evdev ports requested */ @@ -499,21 +566,28 @@ struct dlb2_eventdev { bool defer_sched; enum dlb2_cq_poll_modes poll_mode; uint8_t revision; + uint8_t version; bool configured; - uint16_t max_ldb_credits; - uint16_t max_dir_credits; - - /* force hw credit pool counters into exclusive cache lines */ - - /* use __atomic builtins */ /* shared hw cred */ - uint32_t ldb_credit_pool __rte_cache_aligned; - /* use __atomic builtins */ /* shared hw cred */ - uint32_t dir_credit_pool __rte_cache_aligned; + union { + struct { + uint16_t max_ldb_credits; + uint16_t max_dir_credits; + /* use __atomic builtins */ /* shared hw cred */ + uint32_t ldb_credit_pool __rte_cache_aligned; + /* use __atomic builtins */ /* shared hw cred */ + uint32_t dir_credit_pool __rte_cache_aligned; + }; + struct { + uint16_t max_credits; + /* use __atomic builtins */ /* shared hw cred */ + uint32_t credit_pool __rte_cache_aligned; + }; + }; }; /* used for collecting and passing around the dev args */ struct dlb2_qid_depth_thresholds { - int val[DLB2_MAX_NUM_QUEUES]; + int val[DLB2_MAX_NUM_QUEUES_ALL]; }; struct dlb2_devargs { @@ -568,7 +642,8 @@ uint32_t dlb2_get_queue_depth(struct dlb2_eventdev *dlb2, int dlb2_parse_params(const char *params, const char *name, - struct dlb2_devargs *dlb2_args); + struct dlb2_devargs *dlb2_args, + uint8_t version); /* Extern globals */ extern struct process_local_port_data dlb2_port[][DLB2_NUM_PORT_TYPES]; diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c index 8c3c3cda94..b62e62060c 100644 --- a/drivers/event/dlb2/dlb2_xstats.c +++ b/drivers/event/dlb2/dlb2_xstats.c @@ -95,7 +95,7 @@ dlb2_device_traffic_stat_get(struct dlb2_eventdev *dlb2, int i; uint64_t val = 0; - for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) { + for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) { struct dlb2_eventdev_port *port = &dlb2->ev_ports[i]; if (!port->setup_done) @@ -269,7 +269,7 @@ dlb2_get_threshold_stat(struct dlb2_eventdev *dlb2, int qid, int stat) int port = 0; uint64_t tally = 0; - for (port = 0; port < DLB2_MAX_NUM_PORTS; port++) + for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) tally += dlb2->ev_ports[port].stats.queue[qid].qid_depth[stat]; return tally; @@ -281,7 +281,7 @@ dlb2_get_enq_ok_stat(struct dlb2_eventdev *dlb2, int qid) int port = 0; uint64_t enq_ok_tally = 0; - for (port = 0; port < DLB2_MAX_NUM_PORTS; port++) + for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) enq_ok_tally += dlb2->ev_ports[port].stats.queue[qid].enq_ok; return enq_ok_tally; @@ -561,8 +561,8 @@ dlb2_xstats_init(struct dlb2_eventdev *dlb2) /* other vars */ const unsigned int count = RTE_DIM(dev_stats) + - DLB2_MAX_NUM_PORTS * RTE_DIM(port_stats) + - DLB2_MAX_NUM_QUEUES * RTE_DIM(qid_stats); + DLB2_MAX_NUM_PORTS(dlb2->version) * RTE_DIM(port_stats) + + DLB2_MAX_NUM_QUEUES(dlb2->version) * RTE_DIM(qid_stats); unsigned int i, port, qid, stat_id = 0; dlb2->xstats = rte_zmalloc_socket(NULL, @@ -583,7 +583,7 @@ dlb2_xstats_init(struct dlb2_eventdev *dlb2) } dlb2->xstats_count_mode_dev = stat_id; - for (port = 0; port < DLB2_MAX_NUM_PORTS; port++) { + for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) { dlb2->xstats_offset_for_port[port] = stat_id; uint32_t count_offset = stat_id; @@ -605,7 +605,7 @@ dlb2_xstats_init(struct dlb2_eventdev *dlb2) dlb2->xstats_count_mode_port = stat_id - dlb2->xstats_count_mode_dev; - for (qid = 0; qid < DLB2_MAX_NUM_QUEUES; qid++) { + for (qid = 0; qid < DLB2_MAX_NUM_QUEUES(dlb2->version); qid++) { uint32_t count_offset = stat_id; dlb2->xstats_offset_for_qid[qid] = stat_id; @@ -658,16 +658,15 @@ dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev, xstats_mode_count = dlb2->xstats_count_mode_dev; break; case RTE_EVENT_DEV_XSTATS_PORT: - if (queue_port_id >= DLB2_MAX_NUM_PORTS) + if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version)) break; xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id]; start_offset = dlb2->xstats_offset_for_port[queue_port_id]; break; case RTE_EVENT_DEV_XSTATS_QUEUE: -#if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */ - if (queue_port_id >= DLB2_MAX_NUM_QUEUES) + if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version) && + (DLB2_MAX_NUM_QUEUES(dlb2->version) <= 255)) break; -#endif xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id]; start_offset = dlb2->xstats_offset_for_qid[queue_port_id]; break; @@ -709,13 +708,13 @@ dlb2_xstats_update(struct dlb2_eventdev *dlb2, xstats_mode_count = dlb2->xstats_count_mode_dev; break; case RTE_EVENT_DEV_XSTATS_PORT: - if (queue_port_id >= DLB2_MAX_NUM_PORTS) + if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version)) goto invalid_value; xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id]; break; case RTE_EVENT_DEV_XSTATS_QUEUE: -#if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */ - if (queue_port_id >= DLB2_MAX_NUM_QUEUES) +#if (DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) <= 255) /* max 8 bit value */ + if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version)) goto invalid_value; #endif xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id]; @@ -936,12 +935,13 @@ dlb2_eventdev_xstats_reset(struct rte_eventdev *dev, break; case RTE_EVENT_DEV_XSTATS_PORT: if (queue_port_id == -1) { - for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) { + for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); + i++) { if (dlb2_xstats_reset_port(dlb2, i, ids, nb_ids)) return -EINVAL; } - } else if (queue_port_id < DLB2_MAX_NUM_PORTS) { + } else if (queue_port_id < DLB2_MAX_NUM_PORTS(dlb2->version)) { if (dlb2_xstats_reset_port(dlb2, queue_port_id, ids, nb_ids)) return -EINVAL; @@ -949,12 +949,13 @@ dlb2_eventdev_xstats_reset(struct rte_eventdev *dev, break; case RTE_EVENT_DEV_XSTATS_QUEUE: if (queue_port_id == -1) { - for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++) { + for (i = 0; i < DLB2_MAX_NUM_QUEUES(dlb2->version); + i++) { if (dlb2_xstats_reset_queue(dlb2, i, ids, nb_ids)) return -EINVAL; } - } else if (queue_port_id < DLB2_MAX_NUM_QUEUES) { + } else if (queue_port_id < DLB2_MAX_NUM_QUEUES(dlb2->version)) { if (dlb2_xstats_reset_queue(dlb2, queue_port_id, ids, nb_ids)) return -EINVAL; diff --git a/drivers/event/dlb2/pf/base/dlb2_hw_types.h b/drivers/event/dlb2/pf/base/dlb2_hw_types.h index c7cd41f8b4..b007e1674f 100644 --- a/drivers/event/dlb2/pf/base/dlb2_hw_types.h +++ b/drivers/event/dlb2/pf/base/dlb2_hw_types.h @@ -12,18 +12,25 @@ #include "dlb2_osdep_types.h" #define DLB2_MAX_NUM_VDEVS 16 -#define DLB2_MAX_NUM_AQED_ENTRIES 2048 #define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2 -#define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES 5 - #define DLB2_NUM_ARB_WEIGHTS 8 +#define DLB2_MAX_NUM_AQED_ENTRIES 2048 #define DLB2_MAX_WEIGHT 255 #define DLB2_NUM_COS_DOMAINS 4 +#define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2 +#define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES 5 #define DLB2_MAX_CQ_COMP_CHECK_LOOPS 409600 #define DLB2_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30)) + +#define DLB2_FUNC_BAR 0 +#define DLB2_CSR_BAR 2 + #define PCI_DEVICE_ID_INTEL_DLB2_PF 0x2710 #define PCI_DEVICE_ID_INTEL_DLB2_VF 0x2711 +#define PCI_DEVICE_ID_INTEL_DLB2_5_PF 0x2714 +#define PCI_DEVICE_ID_INTEL_DLB2_5_VF 0x2715 + #define DLB2_ALARM_HW_SOURCE_SYS 0 #define DLB2_ALARM_HW_SOURCE_DLB 1 @@ -55,7 +62,8 @@ #define DLB2_DIR_PP_BASE 0x2000000 #define DLB2_DIR_PP_STRIDE 0x1000 #define DLB2_DIR_PP_BOUND (DLB2_DIR_PP_BASE + \ - DLB2_DIR_PP_STRIDE * DLB2_MAX_NUM_DIR_PORTS) + DLB2_DIR_PP_STRIDE * \ + DLB2_MAX_NUM_DIR_PORTS_V2_5) #define DLB2_DIR_PP_OFFS(id) (DLB2_DIR_PP_BASE + (id) * DLB2_PP_SIZE) struct dlb2_resource_id { @@ -183,7 +191,7 @@ struct dlb2_sn_group { static inline bool dlb2_sn_group_full(struct dlb2_sn_group *group) { - u32 mask[] = { + const u32 mask[] = { 0x0000ffff, /* 64 SNs per queue */ 0x000000ff, /* 128 SNs per queue */ 0x0000000f, /* 256 SNs per queue */ @@ -195,7 +203,7 @@ static inline bool dlb2_sn_group_full(struct dlb2_sn_group *group) static inline int dlb2_sn_group_alloc_slot(struct dlb2_sn_group *group) { - u32 bound[6] = {16, 8, 4, 2, 1}; + const u32 bound[] = {16, 8, 4, 2, 1}; u32 i; for (i = 0; i < bound[group->mode]; i++) { @@ -285,7 +293,7 @@ struct dlb2_function_resources { struct dlb2_hw_resources { struct dlb2_ldb_queue ldb_queues[DLB2_MAX_NUM_LDB_QUEUES]; struct dlb2_ldb_port ldb_ports[DLB2_MAX_NUM_LDB_PORTS]; - struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS]; + struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS_V2_5]; struct dlb2_sn_group sn_groups[DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS]; }; @@ -302,11 +310,13 @@ struct dlb2_sw_mbox { }; struct dlb2_hw { + uint8_t ver; + /* BAR 0 address */ - void *csr_kva; + void *csr_kva; unsigned long csr_phys_addr; /* BAR 2 address */ - void *func_kva; + void *func_kva; unsigned long func_phys_addr; /* Resource tracking */ diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index b57157fdc5..1cb0b9f50c 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -211,7 +211,7 @@ int dlb2_resource_init(struct dlb2_hw *hw) &port->func_list); } - hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS; + hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver); for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) { list = &hw->rsrcs.dir_pq_pairs[i].func_list; @@ -219,7 +219,9 @@ int dlb2_resource_init(struct dlb2_hw *hw) } hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS; - hw->pf.num_avail_dqed_entries = DLB2_MAX_NUM_DIR_CREDITS; + hw->pf.num_avail_dqed_entries = + DLB2_MAX_NUM_DIR_CREDITS(hw->ver); + hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES; ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries, @@ -258,7 +260,7 @@ int dlb2_resource_init(struct dlb2_hw *hw) hw->rsrcs.ldb_ports[i].id.vdev_owned = false; } - for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS; i++) { + for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) { hw->rsrcs.dir_pq_pairs[i].id.phys_id = i; hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false; } @@ -2372,7 +2374,7 @@ static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw, else virt_id = port->id.phys_id; - offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id; + offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id; DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val); } @@ -2505,7 +2507,8 @@ static void dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw, struct dlb2_hw_domain *domain) { - int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS; + int domain_offset = domain->id.phys_id * + DLB2_MAX_NUM_DIR_PORTS(hw->ver); struct dlb2_list_entry *iter; struct dlb2_dir_pq_pair *queue; RTE_SET_USED(iter); @@ -2521,7 +2524,8 @@ dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw, DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val); if (queue->id.vdev_owned) { - idx = queue->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + + idx = queue->id.vdev_id * + DLB2_MAX_NUM_DIR_PORTS(hw->ver) + queue->id.virt_id; DLB2_CSR_WR(hw, @@ -2960,7 +2964,8 @@ __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw, else virt_id = port->id.phys_id; - offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id; + offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + + virt_id; DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), @@ -4483,7 +4488,8 @@ dlb2_log_create_dir_port_args(struct dlb2_hw *hw, } static struct dlb2_dir_pq_pair * -dlb2_get_domain_used_dir_pq(u32 id, +dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw, + u32 id, bool vdev_req, struct dlb2_hw_domain *domain) { @@ -4491,7 +4497,7 @@ dlb2_get_domain_used_dir_pq(u32 id, struct dlb2_dir_pq_pair *port; RTE_SET_USED(iter); - if (id >= DLB2_MAX_NUM_DIR_PORTS) + if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver)) return NULL; DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) @@ -4537,7 +4543,8 @@ dlb2_verify_create_dir_port_args(struct dlb2_hw *hw, if (args->queue_id != -1) { struct dlb2_dir_pq_pair *queue; - queue = dlb2_get_domain_used_dir_pq(args->queue_id, + queue = dlb2_get_domain_used_dir_pq(hw, + args->queue_id, vdev_req, domain); @@ -4617,7 +4624,7 @@ static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw, r1.field.pp = port->id.phys_id; - offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id; + offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id; DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), r1.val); @@ -4856,7 +4863,8 @@ int dlb2_hw_create_dir_port(struct dlb2_hw *hw, domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); if (args->queue_id != -1) - port = dlb2_get_domain_used_dir_pq(args->queue_id, + port = dlb2_get_domain_used_dir_pq(hw, + args->queue_id, vdev_req, domain); else @@ -4912,7 +4920,7 @@ static void dlb2_configure_dir_queue(struct dlb2_hw *hw, /* QID write permissions are turned on when the domain is started */ r0.field.vasqid_v = 0; - offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES + + offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) + queue->id.phys_id; DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val); @@ -4934,7 +4942,8 @@ static void dlb2_configure_dir_queue(struct dlb2_hw *hw, union dlb2_sys_vf_dir_vqid_v r3 = { {0} }; union dlb2_sys_vf_dir_vqid2qid r4 = { {0} }; - offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES + queue->id.virt_id; + offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) + + queue->id.virt_id; r3.field.vqid_v = 1; @@ -5000,7 +5009,8 @@ dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw, if (args->port_id != -1) { struct dlb2_dir_pq_pair *port; - port = dlb2_get_domain_used_dir_pq(args->port_id, + port = dlb2_get_domain_used_dir_pq(hw, + args->port_id, vdev_req, domain); @@ -5071,7 +5081,8 @@ int dlb2_hw_create_dir_queue(struct dlb2_hw *hw, } if (args->port_id != -1) - queue = dlb2_get_domain_used_dir_pq(args->port_id, + queue = dlb2_get_domain_used_dir_pq(hw, + args->port_id, vdev_req, domain); else @@ -5919,7 +5930,7 @@ dlb2_hw_start_domain(struct dlb2_hw *hw, r0.field.vasqid_v = 1; - offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS + + offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + dir_queue->id.phys_id; DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val); @@ -5971,7 +5982,7 @@ int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw, id = args->queue_id; - queue = dlb2_get_domain_used_dir_pq(id, vdev_req, domain); + queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain); if (queue == NULL) { resp->status = DLB2_ST_INVALID_QID; return -EINVAL; diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index cfb22efe8a..f57dc1584e 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -47,7 +47,7 @@ dlb2_pf_low_level_io_init(void) { int i; /* Addresses will be initialized at port create */ - for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) { + for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) { /* First directed ports */ dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL; dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL; @@ -628,6 +628,7 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) if (rte_eal_process_type() == RTE_PROC_PRIMARY) { dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */ + dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev); /* Probe the DLB2 PF layer */ dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev); @@ -643,7 +644,8 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) if (pci_dev->device.devargs) { ret = dlb2_parse_params(pci_dev->device.devargs->args, pci_dev->device.devargs->name, - &dlb2_args); + &dlb2_args, + dlb2->version); if (ret) { DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n", ret, rte_errno); @@ -655,6 +657,8 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) event_dlb2_pf_name, &dlb2_args); } else { + dlb2 = dlb2_pmd_priv(eventdev); + dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev); ret = dlb2_secondary_eventdev_probe(eventdev, event_dlb2_pf_name); } @@ -684,6 +688,16 @@ static const struct rte_pci_id pci_id_dlb2_map[] = { }, }; +static const struct rte_pci_id pci_id_dlb2_5_map[] = { + { + RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID, + PCI_DEVICE_ID_INTEL_DLB2_5_PF) + }, + { + .vendor_id = 0, + }, +}; + static int event_dlb2_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) @@ -718,6 +732,40 @@ event_dlb2_pci_remove(struct rte_pci_device *pci_dev) } +static int +event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv, + struct rte_pci_device *pci_dev) +{ + int ret; + + ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev, + sizeof(struct dlb2_eventdev), + dlb2_eventdev_pci_init, + event_dlb2_pf_name); + if (ret) { + DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, " + "ret=%d\n", ret); + } + + return ret; +} + +static int +event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev) +{ + int ret; + + ret = rte_event_pmd_pci_remove(pci_dev, NULL); + + if (ret) { + DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, " + "ret=%d\n", ret); + } + + return ret; + +} + static struct rte_pci_driver pci_eventdev_dlb2_pmd = { .id_table = pci_id_dlb2_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING, @@ -725,5 +773,15 @@ static struct rte_pci_driver pci_eventdev_dlb2_pmd = { .remove = event_dlb2_pci_remove, }; +static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = { + .id_table = pci_id_dlb2_5_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = event_dlb2_5_pci_probe, + .remove = event_dlb2_5_pci_remove, +}; + RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd); RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map); + +RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd); +RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map); -- 2.20.1