1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
11 #include <rte_bus_pci.h>
12 #include <rte_eventdev.h>
13 #include <rte_eventdev_pmd.h>
14 #include <rte_eventdev_pmd_pci.h>
20 #ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
21 #define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
23 #define DLB_INC_STAT(_stat, _incr_val)
26 #define EVDEV_DLB_NAME_PMD_STR "dlb_event"
28 /* command line arg strings */
29 #define NUMA_NODE_ARG "numa_node"
30 #define DLB_MAX_NUM_EVENTS "max_num_events"
31 #define DLB_NUM_DIR_CREDITS "num_dir_credits"
32 #define DEV_ID_ARG "dev_id"
33 #define DLB_DEFER_SCHED_ARG "defer_sched"
34 #define DLB_NUM_ATM_INFLIGHTS_ARG "atm_inflights"
36 /* Begin HW related defines and structs */
38 #define DLB_MAX_NUM_DOMAINS 32
39 #define DLB_MAX_NUM_VFS 16
40 #define DLB_MAX_NUM_LDB_QUEUES 128
41 #define DLB_MAX_NUM_LDB_PORTS 64
42 #define DLB_MAX_NUM_DIR_PORTS 128
43 #define DLB_MAX_NUM_DIR_QUEUES 128
44 #define DLB_MAX_NUM_FLOWS (64 * 1024)
45 #define DLB_MAX_NUM_LDB_CREDITS 16384
46 #define DLB_MAX_NUM_DIR_CREDITS 4096
47 #define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
48 #define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
49 #define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
50 #define DLB_MAX_NUM_ATM_INFLIGHTS 2048
51 #define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
52 #define DLB_QID_PRIORITIES 8
53 #define DLB_MAX_DEVICE_PATH 32
54 #define DLB_MIN_DEQUEUE_TIMEOUT_NS 1
55 #define DLB_NUM_SN_GROUPS 4
56 #define DLB_MAX_LDB_SN_ALLOC 1024
57 /* Note: "- 1" here to support the timeout range check in eventdev_autotest */
58 #define DLB_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
59 #define DLB_DEF_UNORDERED_QID_INFLIGHTS 2048
61 /* 5120 total hist list entries and 64 total ldb ports, which
62 * makes for 5120/64 == 80 hist list entries per port. However, CQ
63 * depth must be a power of 2 and must also be >= HIST LIST entries.
64 * As a result we just limit the maximum dequeue depth to 64.
66 #define DLB_MIN_LDB_CQ_DEPTH 1
67 #define DLB_MIN_DIR_CQ_DEPTH 8
68 #define DLB_MIN_HARDWARE_CQ_DEPTH 8
69 #define DLB_MAX_CQ_DEPTH 64
70 #define DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
73 /* Static per queue/port provisioning values */
74 #define DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 16
76 #define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)
78 #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
80 #define DLB_NUM_QES_PER_CACHE_LINE 4
82 #define DLB_MAX_ENQUEUE_DEPTH 64
83 #define DLB_MIN_ENQUEUE_DEPTH 4
85 #define DLB_NAME_SIZE 64
87 /* Use the upper 3 bits of the event priority to select the DLB priority */
88 #define EV_TO_DLB_PRIO(x) ((x) >> 5)
89 #define DLB_TO_EV_PRIO(x) ((x) << 5)
91 enum dlb_hw_port_type {
95 /* NUM_DLB_PORT_TYPES must be last */
99 #define PORT_TYPE(p) ((p)->is_directed ? DLB_DIR : DLB_LDB)
101 /* Do not change - must match hardware! */
102 enum dlb_hw_sched_type {
103 DLB_SCHED_ATOMIC = 0,
108 /* DLB_NUM_HW_SCHED_TYPES must be last */
109 DLB_NUM_HW_SCHED_TYPES
115 int num_dir_credits_override;
118 int num_atm_inflights;
121 struct dlb_hw_rsrcs {
122 int32_t nb_events_limit;
123 uint32_t num_queues; /* Total queues (ldb + dir) */
124 uint32_t num_ldb_queues; /* Number of available ldb queues */
125 uint32_t num_ldb_ports; /* Number of load balanced ports */
126 uint32_t num_dir_ports; /* Number of directed ports */
127 uint32_t num_ldb_credits; /* Number of load balanced credits */
128 uint32_t num_dir_credits; /* Number of directed credits */
129 uint32_t reorder_window_size; /* Size of reorder window */
132 struct dlb_hw_resource_info {
133 /**> Max resources that can be provided */
134 struct dlb_hw_rsrcs hw_rsrc_max;
135 int num_sched_domains;
137 /**> EAL flags passed to this DLB instance, allowing the application to
138 * identify the pmd backend indicating hardware or software.
140 const char *eal_flags;
143 /* hw-specific format - do not change */
145 struct dlb_event_type {
151 union dlb_opaque_data {
152 uint16_t opaque_data;
153 struct dlb_event_type event_type;
156 struct dlb_msg_info {
158 uint8_t sched_type:2;
163 #define DLB_NEW_CMD_BYTE 0x08
164 #define DLB_FWD_CMD_BYTE 0x0A
165 #define DLB_COMP_CMD_BYTE 0x02
166 #define DLB_NOOP_CMD_BYTE 0x00
167 #define DLB_POP_CMD_BYTE 0x01
169 /* hw-specific format - do not change */
170 struct dlb_enqueue_qe {
173 union dlb_opaque_data u;
175 uint8_t sched_type:2;
198 /* hw-specific format - do not change */
199 struct dlb_cq_pop_qe {
201 union dlb_opaque_data u;
203 uint8_t sched_type:2;
226 /* hw-specific format - do not change */
227 struct dlb_dequeue_qe {
229 union dlb_opaque_data u;
231 uint8_t sched_type:2;
244 enum dlb_port_state {
250 enum dlb_configuration_state {
251 /* The resource has not been configured */
253 /* The resource was configured, but the device was stopped */
255 /* The resource is currently configured */
263 uint16_t dir_credits;
264 uint32_t dequeue_depth;
266 uint16_t cached_ldb_credits;
267 uint16_t ldb_pushcount_at_credit_expiry;
268 uint16_t ldb_credits;
269 uint16_t cached_dir_credits;
270 uint16_t dir_pushcount_at_credit_expiry;
272 bool use_rsvd_token_scheme;
273 uint8_t cq_rsvd_token_deficit;
274 uint16_t owed_tokens;
275 int16_t issued_releases;
278 uint16_t cq_idx_unmasked;
279 uint16_t cq_depth_mask;
280 uint16_t gen_bit_shift;
281 enum dlb_port_state state;
282 enum dlb_configuration_state config_state;
284 uint8_t *qid_mappings;
285 struct dlb_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
286 struct dlb_cq_pop_qe *consume_qe;
287 struct dlb_eventdev *dlb; /* back ptr */
288 struct dlb_eventdev_port *ev_port; /* back ptr */
291 /* Per-process per-port mmio and memory pointers */
292 struct process_local_port_data {
294 uint16_t *ldb_popcount;
295 uint16_t *dir_popcount;
296 struct dlb_dequeue_qe *cq_base;
297 const struct rte_memzone *mz;
304 uint32_t ldb_credit_pool_id;
305 uint32_t dir_credit_pool_id;
306 uint32_t num_ldb_credits;
307 uint32_t num_dir_credits;
308 struct dlb_create_sched_domain_args resources;
312 struct dlb_config cfg;
313 struct dlb_hw_resource_info info;
314 void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb_dev) */
318 rte_spinlock_t resource_lock; /* for MP support */
319 } __rte_cache_aligned;
321 /* End HW related defines and structs */
323 /* Begin DLB PMD Eventdev related defines and structs */
325 #define DLB_MAX_NUM_QUEUES \
326 (DLB_MAX_NUM_DIR_QUEUES + DLB_MAX_NUM_LDB_QUEUES)
328 #define DLB_MAX_NUM_PORTS (DLB_MAX_NUM_DIR_PORTS + DLB_MAX_NUM_LDB_PORTS)
329 #define DLB_MAX_INPUT_QUEUE_DEPTH 256
331 /** Structure to hold the queue to port link establishment attributes */
333 struct dlb_event_queue_link {
340 struct dlb_traffic_stats {
343 uint64_t rx_interrupt_wait;
344 uint64_t rx_umonitor_umwait;
346 uint64_t total_polls;
348 uint64_t tx_nospc_ldb_hw_credits;
349 uint64_t tx_nospc_dir_hw_credits;
350 uint64_t tx_nospc_inflight_max;
351 uint64_t tx_nospc_new_event_limit;
352 uint64_t tx_nospc_inflight_credits;
355 struct dlb_port_stats {
356 struct dlb_traffic_stats traffic;
357 uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
358 uint64_t tx_implicit_rel;
359 uint64_t tx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
361 uint64_t rx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
362 uint64_t rx_sched_invalid;
363 uint64_t enq_ok[DLB_MAX_NUM_QUEUES]; /* per-queue enq_ok */
366 struct dlb_eventdev_port {
367 struct dlb_port qm_port; /* hw specific data structure */
368 struct rte_event_port_conf conf; /* user-supplied configuration */
369 uint16_t inflight_credits; /* num credits this port has right now */
370 uint16_t credit_update_quanta;
371 struct dlb_eventdev *dlb; /* backlink optimization */
372 struct dlb_port_stats stats __rte_cache_aligned;
373 struct dlb_event_queue_link link[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
376 /* num releases yet to be completed on this port.
377 * Only applies to load-balanced ports.
379 uint16_t outstanding_releases;
380 uint16_t inflight_max; /* app requested max inflights for this port */
381 /* setup_done is set when the event port is setup */
383 /* enq_configured is set when the qm port is created */
385 uint8_t implicit_release; /* release events before dequeueing */
386 } __rte_cache_aligned;
389 uint32_t num_qid_inflights; /* User config */
390 uint32_t num_atm_inflights; /* User config */
391 enum dlb_configuration_state config_state;
392 int sched_type; /* LB queue only */
397 struct dlb_eventdev_queue {
398 struct dlb_queue qm_queue;
399 struct rte_event_queue_conf conf; /* User config */
407 DLB_RUN_STATE_STOPPED = 0,
408 DLB_RUN_STATE_STOPPING,
409 DLB_RUN_STATE_STARTING,
410 DLB_RUN_STATE_STARTED
413 struct dlb_eventdev {
414 struct dlb_eventdev_port ev_ports[DLB_MAX_NUM_PORTS];
415 struct dlb_eventdev_queue ev_queues[DLB_MAX_NUM_QUEUES];
416 uint8_t qm_ldb_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
417 uint8_t qm_dir_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
419 /* store num stats and offset of the stats for each queue */
420 uint16_t xstats_count_per_qid[DLB_MAX_NUM_QUEUES];
421 uint16_t xstats_offset_for_qid[DLB_MAX_NUM_QUEUES];
423 /* store num stats and offset of the stats for each port */
424 uint16_t xstats_count_per_port[DLB_MAX_NUM_PORTS];
425 uint16_t xstats_offset_for_port[DLB_MAX_NUM_PORTS];
426 struct dlb_get_num_resources_args hw_rsrc_query_results;
427 uint32_t xstats_count_mode_queue;
428 struct dlb_hw_dev qm_instance; /* strictly hw related */
429 uint64_t global_dequeue_wait_ticks;
430 struct dlb_xstats_entry *xstats;
431 struct rte_eventdev *event_dev; /* backlink to dev */
432 uint32_t xstats_count_mode_port;
433 uint32_t xstats_count_mode_dev;
434 uint32_t xstats_count;
435 uint32_t inflights; /* use __atomic builtins to access */
436 uint32_t new_event_limit;
437 int max_num_events_override;
438 int num_dir_credits_override;
439 volatile enum dlb_run_state run_state;
440 uint16_t num_dir_queues; /* total num of evdev dir queues requested */
441 uint16_t num_dir_credits;
442 uint16_t num_ldb_credits;
443 uint16_t num_queues; /* total queues */
444 uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
445 uint16_t num_ports; /* total num of evdev ports requested */
446 uint16_t num_ldb_ports; /* total num of ldb ports requested */
447 uint16_t num_dir_ports; /* total num of dir ports requested */
450 bool global_dequeue_wait; /* Not using per dequeue wait if true */
452 unsigned int num_atm_inflights_per_queue;
453 enum dlb_cq_poll_modes poll_mode;
458 /* End Eventdev related defines and structs */
462 extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];
464 /* Forwards for non-inlined functions */
466 void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);
468 int dlb_xstats_init(struct dlb_eventdev *dlb);
470 void dlb_xstats_uninit(struct dlb_eventdev *dlb);
472 int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
473 enum rte_event_dev_xstats_mode mode,
474 uint8_t queue_port_id, const unsigned int ids[],
475 uint64_t values[], unsigned int n);
477 int dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
478 enum rte_event_dev_xstats_mode mode,
479 uint8_t queue_port_id,
480 struct rte_event_dev_xstats_name *xstat_names,
481 unsigned int *ids, unsigned int size);
483 uint64_t dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
484 const char *name, unsigned int *id);
486 int dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
487 enum rte_event_dev_xstats_mode mode,
488 int16_t queue_port_id,
489 const uint32_t ids[],
492 int test_dlb_eventdev(void);
494 int dlb_primary_eventdev_probe(struct rte_eventdev *dev,
496 struct dlb_devargs *dlb_args);
498 int dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
501 uint32_t dlb_get_queue_depth(struct dlb_eventdev *dlb,
502 struct dlb_eventdev_queue *queue);
504 int dlb_parse_params(const char *params,
506 struct dlb_devargs *dlb_args);
508 void dlb_entry_points_init(struct rte_eventdev *dev);
510 #endif /* _DLB_PRIV_H_ */