1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 Xilinx, Inc.
11 #include <rte_bus_pci.h>
14 #include "sfc_efx_mcdi.h"
15 #include "sfc_vdpa_debug.h"
16 #include "sfc_vdpa_log.h"
17 #include "sfc_vdpa_ops.h"
19 #define SFC_VDPA_MAC_ADDR "mac"
20 #define SFC_VDPA_DEFAULT_MCDI_IOVA 0x200000000000
22 /* Broadcast & Unicast MAC filters are supported */
23 #define SFC_MAX_SUPPORTED_FILTERS 2
26 * Get function-local index of the associated VI from the
27 * virtqueue number. Queue 0 is reserved for MCDI
29 #define SFC_VDPA_GET_VI_INDEX(vq_num) (((vq_num) / 2) + 1)
31 enum sfc_vdpa_filter_type {
32 SFC_VDPA_BCAST_MAC_FILTER = 0,
33 SFC_VDPA_UCAST_MAC_FILTER = 1,
37 typedef struct sfc_vdpa_filter_s {
39 efx_filter_spec_t spec[SFC_MAX_SUPPORTED_FILTERS];
42 /* Adapter private data */
43 struct sfc_vdpa_adapter {
44 TAILQ_ENTRY(sfc_vdpa_adapter) next;
46 * PMD setup and configuration is not thread safe. Since it is not
47 * performance sensitive, it is better to guarantee thread-safety
48 * and add device level lock. vDPA control operations which
49 * change its state should acquire the lock.
52 struct rte_pci_device *pdev;
54 struct rte_kvargs *kvargs;
58 rte_spinlock_t nic_lock;
62 struct sfc_efx_mcdi mcdi;
63 size_t mcdi_buff_size;
65 uint32_t max_queue_count;
67 char log_prefix[SFC_VDPA_LOG_PREFIX_MAX];
68 uint32_t logtype_main;
70 sfc_vdpa_filter_t filters;
74 int vfio_container_fd;
76 struct sfc_vdpa_ops_data *ops_data;
80 sfc_vdpa_register_logtype(const struct rte_pci_addr *pci_addr,
81 const char *lt_prefix_str,
84 struct sfc_vdpa_adapter *
85 sfc_vdpa_get_adapter_by_dev(struct rte_pci_device *pdev);
86 struct sfc_vdpa_ops_data *
87 sfc_vdpa_get_data_by_dev(struct rte_vdpa_device *vdpa_dev);
90 sfc_vdpa_hw_init(struct sfc_vdpa_adapter *sva);
92 sfc_vdpa_hw_fini(struct sfc_vdpa_adapter *sva);
95 sfc_vdpa_mcdi_init(struct sfc_vdpa_adapter *sva);
97 sfc_vdpa_mcdi_fini(struct sfc_vdpa_adapter *sva);
100 sfc_vdpa_dma_alloc(struct sfc_vdpa_adapter *sva, const char *name,
101 size_t len, efsys_mem_t *esmp);
104 sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp);
107 sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *vdpa_data, bool do_map);
110 sfc_vdpa_filter_remove(struct sfc_vdpa_ops_data *ops_data);
112 sfc_vdpa_filter_config(struct sfc_vdpa_ops_data *ops_data);
114 static inline struct sfc_vdpa_adapter *
115 sfc_vdpa_adapter_by_dev_handle(void *dev_handle)
117 return (struct sfc_vdpa_adapter *)dev_handle;
121 * Add wrapper functions to acquire/release lock to be able to remove or
122 * change the lock in one place.
125 sfc_vdpa_adapter_lock_init(struct sfc_vdpa_adapter *sva)
127 rte_spinlock_init(&sva->lock);
131 sfc_vdpa_adapter_is_locked(struct sfc_vdpa_adapter *sva)
133 return rte_spinlock_is_locked(&sva->lock);
137 sfc_vdpa_adapter_lock(struct sfc_vdpa_adapter *sva)
139 rte_spinlock_lock(&sva->lock);
143 sfc_vdpa_adapter_trylock(struct sfc_vdpa_adapter *sva)
145 return rte_spinlock_trylock(&sva->lock);
149 sfc_vdpa_adapter_unlock(struct sfc_vdpa_adapter *sva)
151 rte_spinlock_unlock(&sva->lock);
155 sfc_vdpa_adapter_lock_fini(__rte_unused struct sfc_vdpa_adapter *sva)
157 /* Just for symmetry of the API */
160 #endif /* _SFC_VDPA_H */