1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
11 #include <rte_eventdev.h>
12 #include <rte_config.h>
13 #include "dlb2_user.h"
15 #include "rte_pmd_dlb2.h"
17 #ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
18 #define DLB2_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
20 #define DLB2_INC_STAT(_stat, _incr_val)
23 #define EVDEV_DLB2_NAME_PMD dlb2_event
25 /* command line arg strings */
26 #define NUMA_NODE_ARG "numa_node"
27 #define DLB2_MAX_NUM_EVENTS "max_num_events"
28 #define DLB2_NUM_DIR_CREDITS "num_dir_credits"
29 #define DEV_ID_ARG "dev_id"
30 #define DLB2_DEFER_SCHED_ARG "defer_sched"
31 #define DLB2_QID_DEPTH_THRESH_ARG "qid_depth_thresh"
32 #define DLB2_COS_ARG "cos"
34 /* Begin HW related defines and structs */
36 #define DLB2_MAX_NUM_DOMAINS 32
37 #define DLB2_MAX_NUM_VFS 16
38 #define DLB2_MAX_NUM_LDB_QUEUES 32
39 #define DLB2_MAX_NUM_LDB_PORTS 64
40 #define DLB2_MAX_NUM_DIR_PORTS 64
41 #define DLB2_MAX_NUM_DIR_QUEUES 64
42 #define DLB2_MAX_NUM_FLOWS (64 * 1024)
43 #define DLB2_MAX_NUM_LDB_CREDITS (8 * 1024)
44 #define DLB2_MAX_NUM_DIR_CREDITS (2 * 1024)
45 #define DLB2_MAX_NUM_LDB_CREDIT_POOLS 64
46 #define DLB2_MAX_NUM_DIR_CREDIT_POOLS 64
47 #define DLB2_MAX_NUM_HIST_LIST_ENTRIES 2048
48 #define DLB2_MAX_NUM_AQOS_ENTRIES 2048
49 #define DLB2_MAX_NUM_QIDS_PER_LDB_CQ 8
50 #define DLB2_QID_PRIORITIES 8
51 #define DLB2_MAX_DEVICE_PATH 32
52 #define DLB2_MIN_DEQUEUE_TIMEOUT_NS 1
53 /* Note: "- 1" here to support the timeout range check in eventdev_autotest */
54 #define DLB2_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
55 #define DLB2_SW_CREDIT_BATCH_SZ 32
56 #define DLB2_NUM_SN_GROUPS 2
57 #define DLB2_MAX_LDB_SN_ALLOC 1024
58 #define DLB2_MAX_QUEUE_DEPTH_THRESHOLD 8191
60 /* 2048 total hist list entries and 64 total ldb ports, which
61 * makes for 2048/64 == 32 hist list entries per port. However, CQ
62 * depth must be a power of 2 and must also be >= HIST LIST entries.
63 * As a result we just limit the maximum dequeue depth to 32.
65 #define DLB2_MIN_CQ_DEPTH 1
66 #define DLB2_MAX_CQ_DEPTH 32
67 #define DLB2_MIN_HARDWARE_CQ_DEPTH 8
68 #define DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
72 * Static per queue/port provisioning values
74 #define DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 64
76 #define CQ_BASE(is_dir) ((is_dir) ? DLB2_DIR_CQ_BASE : DLB2_LDB_CQ_BASE)
77 #define CQ_SIZE(is_dir) ((is_dir) ? DLB2_DIR_CQ_MAX_SIZE : \
79 #define PP_BASE(is_dir) ((is_dir) ? DLB2_DIR_PP_BASE : DLB2_LDB_PP_BASE)
81 #define DLB2_NUM_QES_PER_CACHE_LINE 4
83 #define DLB2_MAX_ENQUEUE_DEPTH 64
84 #define DLB2_MIN_ENQUEUE_DEPTH 4
86 #define DLB2_NAME_SIZE 64
89 #define DLB2_2K (2 * DLB2_1K)
90 #define DLB2_4K (4 * DLB2_1K)
91 #define DLB2_16K (16 * DLB2_1K)
92 #define DLB2_32K (32 * DLB2_1K)
93 #define DLB2_1MB (DLB2_1K * DLB2_1K)
94 #define DLB2_16MB (16 * DLB2_1MB)
96 /* Use the upper 3 bits of the event priority to select the DLB2 priority */
97 #define EV_TO_DLB2_PRIO(x) ((x) >> 5)
98 #define DLB2_TO_EV_PRIO(x) ((x) << 5)
100 enum dlb2_hw_port_types {
103 DLB2_NUM_PORT_TYPES /* Must be last */
106 enum dlb2_hw_queue_types {
109 DLB2_NUM_QUEUE_TYPES /* Must be last */
112 #define PORT_TYPE(p) ((p)->is_directed ? DLB2_DIR_PORT : DLB2_LDB_PORT)
114 /* Do not change - must match hardware! */
115 enum dlb2_hw_sched_type {
116 DLB2_SCHED_ATOMIC = 0,
117 DLB2_SCHED_UNORDERED,
120 /* DLB2_NUM_HW_SCHED_TYPES must be last */
121 DLB2_NUM_HW_SCHED_TYPES
124 struct dlb2_hw_rsrcs {
125 int32_t nb_events_limit;
126 uint32_t num_queues; /* Total queues (lb + dir) */
127 uint32_t num_ldb_queues; /* Number of available ldb queues */
128 uint32_t num_ldb_ports; /* Number of load balanced ports */
129 uint32_t num_dir_ports; /* Number of directed ports */
130 uint32_t num_ldb_credits; /* Number of load balanced credits */
131 uint32_t num_dir_credits; /* Number of directed credits */
132 uint32_t reorder_window_size; /* Size of reorder window */
135 struct dlb2_hw_resource_info {
136 /**> Max resources that can be provided */
137 struct dlb2_hw_rsrcs hw_rsrc_max;
138 int num_sched_domains;
142 enum dlb2_enqueue_type {
144 * New : Used to inject a new packet into the QM.
148 * Forward : Enqueues a packet, and
149 * - if atomic: release any lock it holds in the QM
150 * - if ordered: release the packet for egress re-ordering
154 * Enqueue Drop : Release an inflight packet. Must be called with
155 * event == NULL. Used to drop a packet.
157 * Note that all packets dequeued from a load-balanced port must be
158 * released, either with DLB2_ENQ_DROP or DLB2_ENQ_FWD.
162 /* marker for array sizing etc. */
166 /* hw-specific format - do not change */
168 struct dlb2_event_type {
174 union dlb2_opaque_data {
175 uint16_t opaque_data;
176 struct dlb2_event_type event_type;
179 struct dlb2_msg_info {
181 uint8_t sched_type:2;
186 #define DLB2_NEW_CMD_BYTE 0x08
187 #define DLB2_FWD_CMD_BYTE 0x0A
188 #define DLB2_COMP_CMD_BYTE 0x02
189 #define DLB2_POP_CMD_BYTE 0x01
190 #define DLB2_NOOP_CMD_BYTE 0x00
192 /* hw-specific format - do not change */
193 struct dlb2_enqueue_qe {
196 union dlb2_opaque_data u;
198 uint8_t sched_type:2;
221 /* hw-specific format - do not change */
222 struct dlb2_cq_pop_qe {
224 union dlb2_opaque_data u;
226 uint8_t sched_type:2;
249 /* hw-specific format - do not change */
250 struct dlb2_dequeue_qe {
252 union dlb2_opaque_data u;
254 uint8_t sched_type:2;
257 uint16_t flow_id:16; /* was pp_id in v1 */
260 uint8_t qid_depth:2; /* 2 bits in v2 */
266 union dlb2_port_config {
267 struct dlb2_create_ldb_port_args ldb;
268 struct dlb2_create_dir_port_args dir;
271 enum dlb2_port_state {
277 enum dlb2_configuration_state {
278 /* The resource has not been configured */
280 /* The resource was configured, but the device was stopped */
281 DLB2_PREV_CONFIGURED,
282 /* The resource is currently configured */
290 uint16_t dir_credits;
291 uint32_t dequeue_depth;
292 enum dlb2_token_pop_mode token_pop_mode;
293 union dlb2_port_config cfg;
294 uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
295 uint16_t cached_ldb_credits;
296 uint16_t ldb_credits;
297 uint16_t cached_dir_credits;
299 uint16_t owed_tokens;
300 int16_t issued_releases;
301 int16_t token_pop_thresh;
304 uint16_t cq_idx_unmasked;
305 uint16_t cq_depth_mask;
306 uint16_t gen_bit_shift;
307 enum dlb2_port_state state;
308 enum dlb2_configuration_state config_state;
310 uint8_t *qid_mappings;
311 struct dlb2_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
312 struct dlb2_enqueue_qe *int_arm_qe;
313 struct dlb2_cq_pop_qe *consume_qe;
314 struct dlb2_eventdev *dlb2; /* back ptr */
315 struct dlb2_eventdev_port *ev_port; /* back ptr */
318 /* Per-process per-port mmio and memory pointers */
319 struct process_local_port_data {
321 struct dlb2_dequeue_qe *cq_base;
322 const struct rte_memzone *mz;
326 struct dlb2_eventdev;
331 uint32_t num_ldb_credits;
332 uint32_t num_dir_credits;
333 struct dlb2_create_sched_domain_args resources;
337 DLB2_COS_DEFAULT = -1,
345 struct dlb2_config cfg;
346 struct dlb2_hw_resource_info info;
347 void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb2_dev) */
349 enum dlb2_cos cos_id;
350 rte_spinlock_t resource_lock; /* for MP support */
351 } __rte_cache_aligned;
353 /* End HW related defines and structs */
355 /* Begin DLB2 PMD Eventdev related defines and structs */
357 #define DLB2_MAX_NUM_QUEUES \
358 (DLB2_MAX_NUM_DIR_QUEUES + DLB2_MAX_NUM_LDB_QUEUES)
360 #define DLB2_MAX_NUM_PORTS (DLB2_MAX_NUM_DIR_PORTS + DLB2_MAX_NUM_LDB_PORTS)
361 #define DLB2_MAX_INPUT_QUEUE_DEPTH 256
363 /** Structure to hold the queue to port link establishment attributes */
365 struct dlb2_event_queue_link {
372 struct dlb2_traffic_stats {
375 uint64_t rx_interrupt_wait;
376 uint64_t rx_umonitor_umwait;
378 uint64_t total_polls;
380 uint64_t tx_nospc_ldb_hw_credits;
381 uint64_t tx_nospc_dir_hw_credits;
382 uint64_t tx_nospc_inflight_max;
383 uint64_t tx_nospc_new_event_limit;
384 uint64_t tx_nospc_inflight_credits;
387 /* DLB2 HW sets the 2bit qid_depth in rx QEs based on the programmable depth
388 * threshold. The global default value in config/common_base (or rte_config.h)
389 * can be overridden on a per-qid basis using a vdev command line parameter.
390 * 3: depth > threshold
391 * 2: threshold >= depth > 3/4 threshold
392 * 1: 3/4 threshold >= depth > 1/2 threshold
393 * 0: depth <= 1/2 threshold.
395 #define DLB2_QID_DEPTH_LE50 0
396 #define DLB2_QID_DEPTH_GT50_LE75 1
397 #define DLB2_QID_DEPTH_GT75_LE100 2
398 #define DLB2_QID_DEPTH_GT100 3
399 #define DLB2_NUM_QID_DEPTH_STAT_VALS 4 /* 2 bits */
401 struct dlb2_queue_stats {
403 uint64_t qid_depth[DLB2_NUM_QID_DEPTH_STAT_VALS];
406 struct dlb2_port_stats {
407 struct dlb2_traffic_stats traffic;
408 uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
409 uint64_t tx_implicit_rel;
410 uint64_t tx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES];
412 uint64_t rx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES];
413 uint64_t rx_sched_invalid;
414 struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES];
417 struct dlb2_eventdev_port {
418 struct dlb2_port qm_port; /* hw specific data structure */
419 struct rte_event_port_conf conf; /* user-supplied configuration */
420 uint16_t inflight_credits; /* num credits this port has right now */
421 uint16_t credit_update_quanta;
422 struct dlb2_eventdev *dlb2; /* backlink optimization */
423 struct dlb2_port_stats stats __rte_cache_aligned;
424 struct dlb2_event_queue_link link[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
426 uint32_t id; /* port id */
427 /* num releases yet to be completed on this port.
428 * Only applies to load-balanced ports.
430 uint16_t outstanding_releases;
431 uint16_t inflight_max; /* app requested max inflights for this port */
432 /* setup_done is set when the event port is setup */
434 /* enq_configured is set when the qm port is created */
436 uint8_t implicit_release; /* release events before dequeueing */
437 } __rte_cache_aligned;
440 uint32_t num_qid_inflights; /* User config */
441 uint32_t num_atm_inflights; /* User config */
442 enum dlb2_configuration_state config_state;
443 int sched_type; /* LB queue only */
448 struct dlb2_eventdev_queue {
449 struct dlb2_queue qm_queue;
450 struct rte_event_queue_conf conf; /* User config */
451 int depth_threshold; /* use default if 0 */
457 enum dlb2_run_state {
458 DLB2_RUN_STATE_STOPPED = 0,
459 DLB2_RUN_STATE_STOPPING,
460 DLB2_RUN_STATE_STARTING,
461 DLB2_RUN_STATE_STARTED
464 struct dlb2_eventdev {
465 struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS];
466 struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES];
467 uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES];
468 uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES];
469 /* store num stats and offset of the stats for each queue */
470 uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES];
471 uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES];
472 /* store num stats and offset of the stats for each port */
473 uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS];
474 uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS];
475 struct dlb2_get_num_resources_args hw_rsrc_query_results;
476 uint32_t xstats_count_mode_queue;
477 struct dlb2_hw_dev qm_instance; /* strictly hw related */
478 uint64_t global_dequeue_wait_ticks;
479 struct dlb2_xstats_entry *xstats;
480 struct rte_eventdev *event_dev; /* backlink to dev */
481 uint32_t xstats_count_mode_dev;
482 uint32_t xstats_count_mode_port;
483 uint32_t xstats_count;
484 uint32_t inflights; /* use __atomic builtins */
485 uint32_t new_event_limit;
486 int max_num_events_override;
487 int num_dir_credits_override;
488 volatile enum dlb2_run_state run_state;
489 uint16_t num_dir_queues; /* total num of evdev dir queues requested */
490 uint16_t num_dir_credits;
491 uint16_t num_ldb_credits;
492 uint16_t num_queues; /* total queues */
493 uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
494 uint16_t num_ports; /* total num of evdev ports requested */
495 uint16_t num_ldb_ports; /* total num of ldb ports requested */
496 uint16_t num_dir_ports; /* total num of dir ports requested */
498 bool global_dequeue_wait; /* Not using per dequeue wait if true */
500 enum dlb2_cq_poll_modes poll_mode;
503 uint16_t max_ldb_credits;
504 uint16_t max_dir_credits;
506 /* force hw credit pool counters into exclusive cache lines */
508 /* use __atomic builtins */ /* shared hw cred */
509 uint32_t ldb_credit_pool __rte_cache_aligned;
510 /* use __atomic builtins */ /* shared hw cred */
511 uint32_t dir_credit_pool __rte_cache_aligned;
514 /* used for collecting and passing around the dev args */
515 struct dlb2_qid_depth_thresholds {
516 int val[DLB2_MAX_NUM_QUEUES];
519 struct dlb2_devargs {
522 int num_dir_credits_override;
525 struct dlb2_qid_depth_thresholds qid_depth_thresholds;
526 enum dlb2_cos cos_id;
529 /* End Eventdev related defines and structs */
531 /* Forwards for non-inlined functions */
533 void dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f);
535 int dlb2_xstats_init(struct dlb2_eventdev *dlb2);
537 void dlb2_xstats_uninit(struct dlb2_eventdev *dlb2);
539 int dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
540 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
541 const unsigned int ids[], uint64_t values[], unsigned int n);
543 int dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
544 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
545 struct rte_event_dev_xstats_name *xstat_names,
546 unsigned int *ids, unsigned int size);
548 uint64_t dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
549 const char *name, unsigned int *id);
551 int dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
552 enum rte_event_dev_xstats_mode mode,
553 int16_t queue_port_id,
554 const uint32_t ids[],
557 int test_dlb2_eventdev(void);
559 int dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
561 struct dlb2_devargs *dlb2_args);
563 int dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
566 uint32_t dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
567 struct dlb2_eventdev_queue *queue);
569 int dlb2_parse_params(const char *params,
571 struct dlb2_devargs *dlb2_args);
574 extern struct process_local_port_data dlb2_port[][DLB2_NUM_PORT_TYPES];
576 #endif /* _DLB2_PRIV_H_ */