#define EV_TO_DLB2_PRIO(x) ((x) >> 5)
#define DLB2_TO_EV_PRIO(x) ((x) << 5)
+enum dlb2_hw_ver {
+ DLB2_HW_VER_2,
+ DLB2_HW_VER_2_5,
+};
+
enum dlb2_hw_port_types {
DLB2_LDB_PORT,
DLB2_DIR_PORT,
'pf/dlb2_main.c',
'pf/dlb2_pf.c',
'pf/base/dlb2_resource.c',
+ 'pf/base/dlb2_resource_new.c',
'rte_pmd_dlb2.c',
'dlb2_selftest.c',
)
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB2_HW_TYPES_NEW_H
+#define __DLB2_HW_TYPES_NEW_H
+
+#include "../../dlb2_priv.h"
+#include "dlb2_user.h"
+
+#include "dlb2_osdep_list.h"
+#include "dlb2_osdep_types.h"
+#include "dlb2_regs_new.h"
+
+#define DLB2_BITS_SET(x, val, mask) (x = ((x) & ~(mask)) \
+ | (((val) << (mask##_LOC)) & (mask)))
+#define DLB2_BITS_CLR(x, mask) (x &= ~(mask))
+#define DLB2_BIT_SET(x, mask) ((x) |= (mask))
+#define DLB2_BITS_GET(x, mask) (((x) & (mask)) >> (mask##_LOC))
+
+#define DLB2_MAX_NUM_VDEVS 16
+#define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2
+#define DLB2_NUM_ARB_WEIGHTS 8
+#define DLB2_MAX_NUM_AQED_ENTRIES 2048
+#define DLB2_MAX_WEIGHT 255
+#define DLB2_NUM_COS_DOMAINS 4
+#define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2
+#define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES 5
+#define DLB2_MAX_CQ_COMP_CHECK_LOOPS 409600
+#define DLB2_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30))
+
+#define DLB2_FUNC_BAR 0
+#define DLB2_CSR_BAR 2
+
+#define PCI_DEVICE_ID_INTEL_DLB2_PF 0x2710
+#define PCI_DEVICE_ID_INTEL_DLB2_VF 0x2711
+
+#define PCI_DEVICE_ID_INTEL_DLB2_5_PF 0x2714
+#define PCI_DEVICE_ID_INTEL_DLB2_5_VF 0x2715
+
+#define DLB2_ALARM_HW_SOURCE_SYS 0
+#define DLB2_ALARM_HW_SOURCE_DLB 1
+
+#define DLB2_ALARM_HW_UNIT_CHP 4
+
+#define DLB2_ALARM_SYS_AID_ILLEGAL_QID 3
+#define DLB2_ALARM_SYS_AID_DISABLED_QID 4
+#define DLB2_ALARM_SYS_AID_ILLEGAL_HCW 5
+#define DLB2_ALARM_HW_CHP_AID_ILLEGAL_ENQ 1
+#define DLB2_ALARM_HW_CHP_AID_EXCESS_TOKEN_POPS 2
+
+/*
+ * Hardware-defined base addresses. Those prefixed 'DLB2_DRV' are only used by
+ * the PF driver.
+ */
+#define DLB2_DRV_LDB_PP_BASE 0x2300000
+#define DLB2_DRV_LDB_PP_STRIDE 0x1000
+#define DLB2_DRV_LDB_PP_BOUND (DLB2_DRV_LDB_PP_BASE + \
+ DLB2_DRV_LDB_PP_STRIDE * DLB2_MAX_NUM_LDB_PORTS)
+#define DLB2_DRV_DIR_PP_BASE 0x2200000
+#define DLB2_DRV_DIR_PP_STRIDE 0x1000
+#define DLB2_DRV_DIR_PP_BOUND (DLB2_DRV_DIR_PP_BASE + \
+ DLB2_DRV_DIR_PP_STRIDE * DLB2_MAX_NUM_DIR_PORTS)
+#define DLB2_LDB_PP_BASE 0x2100000
+#define DLB2_LDB_PP_STRIDE 0x1000
+#define DLB2_LDB_PP_BOUND (DLB2_LDB_PP_BASE + \
+ DLB2_LDB_PP_STRIDE * DLB2_MAX_NUM_LDB_PORTS)
+#define DLB2_LDB_PP_OFFS(id) (DLB2_LDB_PP_BASE + (id) * DLB2_PP_SIZE)
+#define DLB2_DIR_PP_BASE 0x2000000
+#define DLB2_DIR_PP_STRIDE 0x1000
+#define DLB2_DIR_PP_BOUND (DLB2_DIR_PP_BASE + \
+ DLB2_DIR_PP_STRIDE * \
+ DLB2_MAX_NUM_DIR_PORTS_V2_5)
+#define DLB2_DIR_PP_OFFS(id) (DLB2_DIR_PP_BASE + (id) * DLB2_PP_SIZE)
+
+struct dlb2_resource_id {
+ u32 phys_id;
+ u32 virt_id;
+ u8 vdev_owned;
+ u8 vdev_id;
+};
+
+struct dlb2_freelist {
+ u32 base;
+ u32 bound;
+ u32 offset;
+};
+
+static inline u32 dlb2_freelist_count(struct dlb2_freelist *list)
+{
+ return list->bound - list->base - list->offset;
+}
+
+struct dlb2_hcw {
+ u64 data;
+ /* Word 3 */
+ u16 opaque;
+ u8 qid;
+ u8 sched_type:2;
+ u8 priority:3;
+ u8 msg_type:3;
+ /* Word 4 */
+ u16 lock_id;
+ u8 ts_flag:1;
+ u8 rsvd1:2;
+ u8 no_dec:1;
+ u8 cmp_id:4;
+ u8 cq_token:1;
+ u8 qe_comp:1;
+ u8 qe_frag:1;
+ u8 qe_valid:1;
+ u8 int_arm:1;
+ u8 error:1;
+ u8 rsvd:2;
+};
+
+struct dlb2_ldb_queue {
+ struct dlb2_list_entry domain_list;
+ struct dlb2_list_entry func_list;
+ struct dlb2_resource_id id;
+ struct dlb2_resource_id domain_id;
+ u32 num_qid_inflights;
+ u32 aqed_limit;
+ u32 sn_group; /* sn == sequence number */
+ u32 sn_slot;
+ u32 num_mappings;
+ u8 sn_cfg_valid;
+ u8 num_pending_additions;
+ u8 owned;
+ u8 configured;
+};
+
+/*
+ * Directed ports and queues are paired by nature, so the driver tracks them
+ * with a single data structure.
+ */
+struct dlb2_dir_pq_pair {
+ struct dlb2_list_entry domain_list;
+ struct dlb2_list_entry func_list;
+ struct dlb2_resource_id id;
+ struct dlb2_resource_id domain_id;
+ u32 ref_cnt;
+ u8 init_tkn_cnt;
+ u8 queue_configured;
+ u8 port_configured;
+ u8 owned;
+ u8 enabled;
+};
+
+enum dlb2_qid_map_state {
+ /* The slot does not contain a valid queue mapping */
+ DLB2_QUEUE_UNMAPPED,
+ /* The slot contains a valid queue mapping */
+ DLB2_QUEUE_MAPPED,
+ /* The driver is mapping a queue into this slot */
+ DLB2_QUEUE_MAP_IN_PROG,
+ /* The driver is unmapping a queue from this slot */
+ DLB2_QUEUE_UNMAP_IN_PROG,
+ /*
+ * The driver is unmapping a queue from this slot, and once complete
+ * will replace it with another mapping.
+ */
+ DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP,
+};
+
+struct dlb2_ldb_port_qid_map {
+ enum dlb2_qid_map_state state;
+ u16 qid;
+ u16 pending_qid;
+ u8 priority;
+ u8 pending_priority;
+};
+
+struct dlb2_ldb_port {
+ struct dlb2_list_entry domain_list;
+ struct dlb2_list_entry func_list;
+ struct dlb2_resource_id id;
+ struct dlb2_resource_id domain_id;
+ /* The qid_map represents the hardware QID mapping state. */
+ struct dlb2_ldb_port_qid_map qid_map[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
+ u32 hist_list_entry_base;
+ u32 hist_list_entry_limit;
+ u32 ref_cnt;
+ u8 init_tkn_cnt;
+ u8 num_pending_removals;
+ u8 num_mappings;
+ u8 owned;
+ u8 enabled;
+ u8 configured;
+};
+
+struct dlb2_sn_group {
+ u32 mode;
+ u32 sequence_numbers_per_queue;
+ u32 slot_use_bitmap;
+ u32 id;
+};
+
+static inline bool dlb2_sn_group_full(struct dlb2_sn_group *group)
+{
+ const u32 mask[] = {
+ 0x0000ffff, /* 64 SNs per queue */
+ 0x000000ff, /* 128 SNs per queue */
+ 0x0000000f, /* 256 SNs per queue */
+ 0x00000003, /* 512 SNs per queue */
+ 0x00000001}; /* 1024 SNs per queue */
+
+ return group->slot_use_bitmap == mask[group->mode];
+}
+
+static inline int dlb2_sn_group_alloc_slot(struct dlb2_sn_group *group)
+{
+ const u32 bound[] = {16, 8, 4, 2, 1};
+ u32 i;
+
+ for (i = 0; i < bound[group->mode]; i++) {
+ if (!(group->slot_use_bitmap & (1 << i))) {
+ group->slot_use_bitmap |= 1 << i;
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static inline void
+dlb2_sn_group_free_slot(struct dlb2_sn_group *group, int slot)
+{
+ group->slot_use_bitmap &= ~(1 << slot);
+}
+
+static inline int dlb2_sn_group_used_slots(struct dlb2_sn_group *group)
+{
+ int i, cnt = 0;
+
+ for (i = 0; i < 32; i++)
+ cnt += !!(group->slot_use_bitmap & (1 << i));
+
+ return cnt;
+}
+
+struct dlb2_hw_domain {
+ struct dlb2_function_resources *parent_func;
+ struct dlb2_list_entry func_list;
+ struct dlb2_list_head used_ldb_queues;
+ struct dlb2_list_head used_ldb_ports[DLB2_NUM_COS_DOMAINS];
+ struct dlb2_list_head used_dir_pq_pairs;
+ struct dlb2_list_head avail_ldb_queues;
+ struct dlb2_list_head avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
+ struct dlb2_list_head avail_dir_pq_pairs;
+ u32 total_hist_list_entries;
+ u32 avail_hist_list_entries;
+ u32 hist_list_entry_base;
+ u32 hist_list_entry_offset;
+ union {
+ struct {
+ u32 num_ldb_credits;
+ u32 num_dir_credits;
+ };
+ struct {
+ u32 num_credits;
+ };
+ };
+ u32 num_avail_aqed_entries;
+ u32 num_used_aqed_entries;
+ struct dlb2_resource_id id;
+ int num_pending_removals;
+ int num_pending_additions;
+ u8 configured;
+ u8 started;
+};
+
+struct dlb2_bitmap;
+
+struct dlb2_function_resources {
+ struct dlb2_list_head avail_domains;
+ struct dlb2_list_head used_domains;
+ struct dlb2_list_head avail_ldb_queues;
+ struct dlb2_list_head avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
+ struct dlb2_list_head avail_dir_pq_pairs;
+ struct dlb2_bitmap *avail_hist_list_entries;
+ u32 num_avail_domains;
+ u32 num_avail_ldb_queues;
+ u32 num_avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
+ u32 num_avail_dir_pq_pairs;
+ union {
+ struct {
+ u32 num_avail_qed_entries;
+ u32 num_avail_dqed_entries;
+ };
+ struct {
+ u32 num_avail_entries;
+ };
+ };
+ u32 num_avail_aqed_entries;
+ u8 locked; /* (VDEV only) */
+};
+
+/*
+ * After initialization, each resource in dlb2_hw_resources is located in one
+ * of the following lists:
+ * -- The PF's available resources list. These are unconfigured resources owned
+ * by the PF and not allocated to a dlb2 scheduling domain.
+ * -- A VDEV's available resources list. These are VDEV-owned unconfigured
+ * resources not allocated to a dlb2 scheduling domain.
+ * -- A domain's available resources list. These are domain-owned unconfigured
+ * resources.
+ * -- A domain's used resources list. These are domain-owned configured
+ * resources.
+ *
+ * A resource moves to a new list when a VDEV or domain is created or destroyed,
+ * or when the resource is configured.
+ */
+struct dlb2_hw_resources {
+ struct dlb2_ldb_queue ldb_queues[DLB2_MAX_NUM_LDB_QUEUES];
+ struct dlb2_ldb_port ldb_ports[DLB2_MAX_NUM_LDB_PORTS];
+ struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS_V2_5];
+ struct dlb2_sn_group sn_groups[DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
+};
+
+struct dlb2_mbox {
+ u32 *mbox;
+ u32 *isr_in_progress;
+};
+
+struct dlb2_sw_mbox {
+ struct dlb2_mbox vdev_to_pf;
+ struct dlb2_mbox pf_to_vdev;
+ void (*pf_to_vdev_inject)(void *arg);
+ void *pf_to_vdev_inject_arg;
+};
+
+struct dlb2_hw {
+ uint8_t ver;
+
+ /* BAR 0 address */
+ void *csr_kva;
+ unsigned long csr_phys_addr;
+ /* BAR 2 address */
+ void *func_kva;
+ unsigned long func_phys_addr;
+
+ /* Resource tracking */
+ struct dlb2_hw_resources rsrcs;
+ struct dlb2_function_resources pf;
+ struct dlb2_function_resources vdev[DLB2_MAX_NUM_VDEVS];
+ struct dlb2_hw_domain domains[DLB2_MAX_NUM_DOMAINS];
+ u8 cos_reservation[DLB2_NUM_COS_DOMAINS];
+
+ /* Virtualization */
+ int virt_mode;
+ struct dlb2_sw_mbox mbox[DLB2_MAX_NUM_VDEVS];
+ unsigned int pasid[DLB2_MAX_NUM_VDEVS];
+};
+
+#endif /* __DLB2_HW_TYPES_NEW_H */
#include <rte_log.h>
#include <rte_spinlock.h>
#include "../dlb2_main.h"
+
+/* TEMPORARY inclusion of both headers for merge */
+#include "dlb2_resource_new.h"
#include "dlb2_resource.h"
+
#include "../../dlb2_log.h"
#include "../../dlb2_user.h"
dlb2_list_init_head(&domain->avail_ldb_ports[i]);
}
-static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
-{
- int i;
-
- dlb2_list_init_head(&rsrc->avail_domains);
- dlb2_list_init_head(&rsrc->used_domains);
- dlb2_list_init_head(&rsrc->avail_ldb_queues);
- dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
-
- for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
- dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
-}
-
void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
{
union dlb2_chp_cfg_chp_csr_ctrl r0;
DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
}
-void dlb2_resource_free(struct dlb2_hw *hw)
-{
- int i;
-
- if (hw->pf.avail_hist_list_entries)
- dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
-
- for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
- if (hw->vdev[i].avail_hist_list_entries)
- dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
- }
-}
-
-int dlb2_resource_init(struct dlb2_hw *hw)
-{
- struct dlb2_list_entry *list;
- unsigned int i;
- int ret;
-
- /*
- * For optimal load-balancing, ports that map to one or more QIDs in
- * common should not be in numerical sequence. This is application
- * dependent, but the driver interleaves port IDs as much as possible
- * to reduce the likelihood of this. This initial allocation maximizes
- * the average distance between an ID and its immediate neighbors (i.e.
- * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
- * 3, etc.).
- */
- u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
- 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,
- 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
- 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
- 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
- };
-
- /* Zero-out resource tracking data structures */
- memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
- memset(&hw->pf, 0, sizeof(hw->pf));
-
- dlb2_init_fn_rsrc_lists(&hw->pf);
-
- for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
- memset(&hw->vdev[i], 0, sizeof(hw->vdev[i]));
- dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
- }
-
- for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
- memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
- dlb2_init_domain_rsrc_lists(&hw->domains[i]);
- hw->domains[i].parent_func = &hw->pf;
- }
-
- /* Give all resources to the PF driver */
- hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
- for (i = 0; i < hw->pf.num_avail_domains; i++) {
- list = &hw->domains[i].func_list;
-
- dlb2_list_add(&hw->pf.avail_domains, list);
- }
-
- hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
- for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
- list = &hw->rsrcs.ldb_queues[i].func_list;
-
- dlb2_list_add(&hw->pf.avail_ldb_queues, list);
- }
-
- for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
- hw->pf.num_avail_ldb_ports[i] =
- DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
-
- for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
- int cos_id = i >> DLB2_NUM_COS_DOMAINS;
- struct dlb2_ldb_port *port;
-
- port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
-
- dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
- &port->func_list);
- }
-
- hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
- for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
- list = &hw->rsrcs.dir_pq_pairs[i].func_list;
-
- dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
- }
-
- hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
- hw->pf.num_avail_dqed_entries =
- DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
-
- hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
-
- ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
- DLB2_MAX_NUM_HIST_LIST_ENTRIES);
- if (ret)
- goto unwind;
-
- ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
- if (ret)
- goto unwind;
-
- for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
- ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
- DLB2_MAX_NUM_HIST_LIST_ENTRIES);
- if (ret)
- goto unwind;
-
- ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
- if (ret)
- goto unwind;
- }
-
- /* Initialize the hardware resource IDs */
- for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
- hw->domains[i].id.phys_id = i;
- hw->domains[i].id.vdev_owned = false;
- }
-
- for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
- hw->rsrcs.ldb_queues[i].id.phys_id = i;
- hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
- }
-
- for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
- hw->rsrcs.ldb_ports[i].id.phys_id = i;
- hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
- }
-
- for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
- hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
- hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
- }
-
- for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
- hw->rsrcs.sn_groups[i].id = i;
- /* Default mode (0) is 64 sequence numbers per queue */
- hw->rsrcs.sn_groups[i].mode = 0;
- hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
- hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
- }
-
- for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
- hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
-
- return 0;
-
-unwind:
- dlb2_resource_free(hw);
-
- return ret;
-}
-
-void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw)
-{
- union dlb2_cfg_mstr_cfg_pm_pmcsr_disable r0;
-
- r0.val = DLB2_CSR_RD(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE);
-
- r0.field.disable = 0;
-
- DLB2_CSR_WR(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE, r0.val);
-}
-
static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
struct dlb2_hw_domain *domain)
{
int
dlb2_hw_start_domain(struct dlb2_hw *hw,
u32 domain_id,
- __attribute((unused)) struct dlb2_start_domain_args *arg,
+ struct dlb2_start_domain_args *arg,
struct dlb2_cmd_response *resp,
bool vdev_req,
unsigned int vdev_id)
#define __DLB2_RESOURCE_H
#include "dlb2_user.h"
-
-#include "dlb2_hw_types.h"
#include "dlb2_osdep_types.h"
-/**
- * dlb2_resource_init() - initialize the device
- * @hw: pointer to struct dlb2_hw.
- *
- * This function initializes the device's software state (pointed to by the hw
- * argument) and programs global scheduling QoS registers. This function should
- * be called during driver initialization.
- *
- * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
- * device is reset.
- *
- * Return:
- * Returns 0 upon success, <0 otherwise.
- */
-int dlb2_resource_init(struct dlb2_hw *hw);
-
-/**
- * dlb2_resource_free() - free device state memory
- * @hw: dlb2_hw handle for a particular device.
- *
- * This function frees software state pointed to by dlb2_hw. This function
- * should be called when resetting the device or unloading the driver.
- */
-void dlb2_resource_free(struct dlb2_hw *hw);
-
/**
* dlb2_resource_reset() - reset in-use resources to their initial state
* @hw: dlb2_hw handle for a particular device.
*/
int dlb2_vdev_in_use(struct dlb2_hw *hw, unsigned int id);
-/**
- * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
- * @hw: dlb2_hw handle for a particular device.
- *
- * Clearing the PMCSR must be done at initialization to make the device fully
- * operational.
- */
-void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw);
-
/**
* dlb2_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
* @hw: dlb2_hw handle for a particular device.
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#define DLB2_USE_NEW_HEADERS /* TEMPORARY FOR MERGE */
+
+#include "dlb2_user.h"
+
+#include "dlb2_hw_types_new.h"
+#include "dlb2_osdep.h"
+#include "dlb2_osdep_bitmap.h"
+#include "dlb2_osdep_types.h"
+#include "dlb2_regs_new.h"
+#include "dlb2_resource_new.h" /* TEMP FOR UPSTREAMPATCHES */
+
+#include "../../dlb2_priv.h"
+#include "../../dlb2_inline_fns.h"
+
+#define DLB2_DOM_LIST_HEAD(head, type) \
+ DLB2_LIST_HEAD((head), type, domain_list)
+
+#define DLB2_FUNC_LIST_HEAD(head, type) \
+ DLB2_LIST_HEAD((head), type, func_list)
+
+#define DLB2_DOM_LIST_FOR(head, ptr, iter) \
+ DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
+
+#define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
+ DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
+
+#define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
+ DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
+
+#define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
+ DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
+
+static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
+{
+ int i;
+
+ dlb2_list_init_head(&domain->used_ldb_queues);
+ dlb2_list_init_head(&domain->used_dir_pq_pairs);
+ dlb2_list_init_head(&domain->avail_ldb_queues);
+ dlb2_list_init_head(&domain->avail_dir_pq_pairs);
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
+ dlb2_list_init_head(&domain->used_ldb_ports[i]);
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
+ dlb2_list_init_head(&domain->avail_ldb_ports[i]);
+}
+
+static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
+{
+ int i;
+ dlb2_list_init_head(&rsrc->avail_domains);
+ dlb2_list_init_head(&rsrc->used_domains);
+ dlb2_list_init_head(&rsrc->avail_ldb_queues);
+ dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
+ dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
+}
+
+/**
+ * dlb2_resource_free() - free device state memory
+ * @hw: dlb2_hw handle for a particular device.
+ *
+ * This function frees software state pointed to by dlb2_hw. This function
+ * should be called when resetting the device or unloading the driver.
+ */
+void dlb2_resource_free(struct dlb2_hw *hw)
+{
+ int i;
+
+ if (hw->pf.avail_hist_list_entries)
+ dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
+
+ for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
+ if (hw->vdev[i].avail_hist_list_entries)
+ dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
+ }
+}
+
+/**
+ * dlb2_resource_init() - initialize the device
+ * @hw: pointer to struct dlb2_hw.
+ * @ver: device version.
+ *
+ * This function initializes the device's software state (pointed to by the hw
+ * argument) and programs global scheduling QoS registers. This function should
+ * be called during driver initialization, and the dlb2_hw structure should
+ * be zero-initialized before calling the function.
+ *
+ * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
+ * device is reset.
+ *
+ * Return:
+ * Returns 0 upon success, <0 otherwise.
+ */
+int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
+{
+ struct dlb2_list_entry *list;
+ unsigned int i;
+ int ret;
+
+ /*
+ * For optimal load-balancing, ports that map to one or more QIDs in
+ * common should not be in numerical sequence. The port->QID mapping is
+ * application dependent, but the driver interleaves port IDs as much
+ * as possible to reduce the likelihood of sequential ports mapping to
+ * the same QID(s). This initial allocation of port IDs maximizes the
+ * average distance between an ID and its immediate neighbors (i.e.
+ * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
+ * 3, etc.).
+ */
+ const u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
+ 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,
+ 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
+ 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
+ 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
+ };
+
+ hw->ver = ver;
+
+ dlb2_init_fn_rsrc_lists(&hw->pf);
+
+ for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++)
+ dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
+
+ for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
+ dlb2_init_domain_rsrc_lists(&hw->domains[i]);
+ hw->domains[i].parent_func = &hw->pf;
+ }
+
+ /* Give all resources to the PF driver */
+ hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
+ for (i = 0; i < hw->pf.num_avail_domains; i++) {
+ list = &hw->domains[i].func_list;
+
+ dlb2_list_add(&hw->pf.avail_domains, list);
+ }
+
+ hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
+ for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
+ list = &hw->rsrcs.ldb_queues[i].func_list;
+
+ dlb2_list_add(&hw->pf.avail_ldb_queues, list);
+ }
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
+ hw->pf.num_avail_ldb_ports[i] =
+ DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
+
+ for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
+ int cos_id = i >> DLB2_NUM_COS_DOMAINS;
+ struct dlb2_ldb_port *port;
+
+ port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
+
+ dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
+ &port->func_list);
+ }
+
+ hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
+ for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
+ list = &hw->rsrcs.dir_pq_pairs[i].func_list;
+
+ dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
+ }
+
+ if (hw->ver == DLB2_HW_V2) {
+ hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
+ hw->pf.num_avail_dqed_entries =
+ DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
+ } else {
+ hw->pf.num_avail_entries = DLB2_MAX_NUM_CREDITS(hw->ver);
+ }
+
+ hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
+
+ ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
+ DLB2_MAX_NUM_HIST_LIST_ENTRIES);
+ if (ret)
+ goto unwind;
+
+ ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
+ if (ret)
+ goto unwind;
+
+ for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
+ ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
+ DLB2_MAX_NUM_HIST_LIST_ENTRIES);
+ if (ret)
+ goto unwind;
+
+ ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
+ if (ret)
+ goto unwind;
+ }
+
+ /* Initialize the hardware resource IDs */
+ for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
+ hw->domains[i].id.phys_id = i;
+ hw->domains[i].id.vdev_owned = false;
+ }
+
+ for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
+ hw->rsrcs.ldb_queues[i].id.phys_id = i;
+ hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
+ }
+
+ for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
+ hw->rsrcs.ldb_ports[i].id.phys_id = i;
+ hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
+ }
+
+ for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
+ hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
+ hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
+ }
+
+ for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
+ hw->rsrcs.sn_groups[i].id = i;
+ /* Default mode (0) is 64 sequence numbers per queue */
+ hw->rsrcs.sn_groups[i].mode = 0;
+ hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
+ hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
+ }
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
+ hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
+
+ return 0;
+
+unwind:
+ dlb2_resource_free(hw);
+
+ return ret;
+}
+
+/**
+ * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
+ * @hw: dlb2_hw handle for a particular device.
+ * @ver: device version.
+ *
+ * Clearing the PMCSR must be done at initialization to make the device fully
+ * operational.
+ */
+void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
+{
+ u32 pmcsr_dis;
+
+ pmcsr_dis = DLB2_CSR_RD(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver));
+
+ DLB2_BITS_CLR(pmcsr_dis, DLB2_CM_CFG_PM_PMCSR_DISABLE_DISABLE);
+
+ DLB2_CSR_WR(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver), pmcsr_dis);
+}
+
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB2_RESOURCE_NEW_H
+#define __DLB2_RESOURCE_NEW_H
+
+#include "dlb2_user.h"
+#include "dlb2_osdep_types.h"
+
+/**
+ * dlb2_resource_init() - initialize the device
+ * @hw: pointer to struct dlb2_hw.
+ * @ver: device version.
+ *
+ * This function initializes the device's software state (pointed to by the hw
+ * argument) and programs global scheduling QoS registers. This function should
+ * be called during driver initialization.
+ *
+ * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
+ * device is reset.
+ *
+ * Return:
+ * Returns 0 upon success, <0 otherwise.
+ */
+int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver);
+
+/**
+ * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
+ * @hw: dlb2_hw handle for a particular device.
+ * @ver: device version.
+ *
+ * Clearing the PMCSR must be done at initialization to make the device fully
+ * operational.
+ */
+void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver);
+
+/**
+ * dlb2_finish_unmap_qid_procedures() - finish any pending unmap procedures
+ * @hw: dlb2_hw handle for a particular device.
+ *
+ * This function attempts to finish any outstanding unmap procedures.
+ * This function should be called by the kernel thread responsible for
+ * finishing map/unmap procedures.
+ *
+ * Return:
+ * Returns the number of procedures that weren't completed.
+ */
+unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw);
+
+/**
+ * dlb2_finish_map_qid_procedures() - finish any pending map procedures
+ * @hw: dlb2_hw handle for a particular device.
+ *
+ * This function attempts to finish any outstanding map procedures.
+ * This function should be called by the kernel thread responsible for
+ * finishing map/unmap procedures.
+ *
+ * Return:
+ * Returns the number of procedures that weren't completed.
+ */
+unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw);
+
+/**
+ * dlb2_resource_free() - free device state memory
+ * @hw: dlb2_hw handle for a particular device.
+ *
+ * This function frees software state pointed to by dlb2_hw. This function
+ * should be called when resetting the device or unloading the driver.
+ */
+void dlb2_resource_free(struct dlb2_hw *hw);
+
+#endif /* __DLB2_RESOURCE_NEW_H */
#include <rte_malloc.h>
#include <rte_errno.h>
-#include "base/dlb2_resource.h"
+#define DLB2_USE_NEW_HEADERS /* TEMPORARY FOR MERGE */
+
+#include "base/dlb2_regs_new.h"
+#include "base/dlb2_hw_types_new.h"
+#include "base/dlb2_resource_new.h"
#include "base/dlb2_osdep.h"
-#include "base/dlb2_regs.h"
#include "dlb2_main.h"
#include "../dlb2_user.h"
#include "../dlb2_priv.h"
static void dlb2_pf_enable_pm(struct dlb2_dev *dlb2_dev)
{
- dlb2_clr_pmcsr_disable(&dlb2_dev->hw);
+ int version;
+ version = DLB2_HW_DEVICE_FROM_PCI_ID(dlb2_dev->pdev);
+
+ dlb2_clr_pmcsr_disable(&dlb2_dev->hw, version);
}
#define DLB2_READY_RETRY_LIMIT 1000
-static int dlb2_pf_wait_for_device_ready(struct dlb2_dev *dlb2_dev)
+static int dlb2_pf_wait_for_device_ready(struct dlb2_dev *dlb2_dev,
+ int dlb_version)
{
u32 retries = 0;
/* Allow at least 1s for the device to become active after power-on */
for (retries = 0; retries < DLB2_READY_RETRY_LIMIT; retries++) {
- union dlb2_cfg_mstr_cfg_diagnostic_idle_status idle;
- union dlb2_cfg_mstr_cfg_pm_status pm_st;
+ u32 idle_val;
+ u32 idle_dlb_func_idle;
+ u32 pm_st_val;
+ u32 pm_st_pmsm;
u32 addr;
- addr = DLB2_CFG_MSTR_CFG_PM_STATUS;
- pm_st.val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
- addr = DLB2_CFG_MSTR_CFG_DIAGNOSTIC_IDLE_STATUS;
- idle.val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
- if (pm_st.field.pmsm == 1 && idle.field.dlb_func_idle == 1)
+ addr = DLB2_CM_CFG_PM_STATUS(dlb_version);
+ pm_st_val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
+ addr = DLB2_CM_CFG_DIAGNOSTIC_IDLE_STATUS(dlb_version);
+ idle_val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
+ idle_dlb_func_idle = idle_val &
+ DLB2_CM_CFG_DIAGNOSTIC_IDLE_STATUS_DLB_FUNC_IDLE;
+ pm_st_pmsm = pm_st_val & DLB2_CM_CFG_PM_STATUS_PMSM;
+ if (pm_st_pmsm && idle_dlb_func_idle)
break;
rte_delay_ms(1);
{
struct dlb2_dev *dlb2_dev;
int ret = 0;
+ int dlb_version = 0;
DLB2_INFO(dlb2_dev, "probe\n");
goto dlb2_dev_malloc_fail;
}
+ dlb_version = DLB2_HW_DEVICE_FROM_PCI_ID(pdev);
+
/* PCI Bus driver has already mapped bar space into process.
* Save off our IO register and FUNC addresses.
*/
*/
dlb2_pf_enable_pm(dlb2_dev);
- ret = dlb2_pf_wait_for_device_ready(dlb2_dev);
+ ret = dlb2_pf_wait_for_device_ready(dlb2_dev, dlb_version);
if (ret)
goto wait_for_device_ready_fail;
if (ret)
goto init_driver_state_fail;
- ret = dlb2_resource_init(&dlb2_dev->hw);
+ ret = dlb2_resource_init(&dlb2_dev->hw, dlb_version);
if (ret)
goto resource_init_fail;
#include <rte_bus_pci.h>
#include <rte_eal_paging.h>
+#ifdef DLB2_USE_NEW_HEADERS
+#include "base/dlb2_hw_types_new.h"
+#else
#include "base/dlb2_hw_types.h"
+#endif
#include "../dlb2_user.h"
#define DLB2_DEFAULT_UNREGISTER_TIMEOUT_S 5
#include <rte_memory.h>
#include <rte_string_fns.h>
+#define DLB2_USE_NEW_HEADERS /* TEMPORARY FOR MERGE */
+
#include "../dlb2_priv.h"
#include "../dlb2_iface.h"
#include "../dlb2_inline_fns.h"
#include "dlb2_main.h"
-#include "base/dlb2_hw_types.h"
+#include "base/dlb2_hw_types_new.h"
#include "base/dlb2_osdep.h"
-#include "base/dlb2_resource.h"
+#include "base/dlb2_resource_new.h"
static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);