1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 Xilinx, Inc.
11 #include <rte_bus_pci.h>
14 #include "sfc_efx_mcdi.h"
15 #include "sfc_vdpa_debug.h"
16 #include "sfc_vdpa_log.h"
17 #include "sfc_vdpa_ops.h"
19 #define SFC_VDPA_MAC_ADDR "mac"
20 #define SFC_VDPA_DEFAULT_MCDI_IOVA 0x200000000000
22 /* Broadcast & Unicast MAC filters are supported */
23 #define SFC_MAX_SUPPORTED_FILTERS 3
26 * Get function-local index of the associated VI from the
27 * virtqueue number. Queue 0 is reserved for MCDI
29 #define SFC_VDPA_GET_VI_INDEX(vq_num) (((vq_num) / 2) + 1)
31 enum sfc_vdpa_filter_type {
32 SFC_VDPA_BCAST_MAC_FILTER = 0,
33 SFC_VDPA_UCAST_MAC_FILTER = 1,
34 SFC_VDPA_MCAST_DST_FILTER = 2,
38 typedef struct sfc_vdpa_filter_s {
40 efx_filter_spec_t spec[SFC_MAX_SUPPORTED_FILTERS];
43 /* Adapter private data */
44 struct sfc_vdpa_adapter {
45 TAILQ_ENTRY(sfc_vdpa_adapter) next;
47 * PMD setup and configuration is not thread safe. Since it is not
48 * performance sensitive, it is better to guarantee thread-safety
49 * and add device level lock. vDPA control operations which
50 * change its state should acquire the lock.
53 struct rte_pci_device *pdev;
55 struct rte_kvargs *kvargs;
59 rte_spinlock_t nic_lock;
63 struct sfc_efx_mcdi mcdi;
64 size_t mcdi_buff_size;
66 uint32_t max_queue_count;
68 char log_prefix[SFC_VDPA_LOG_PREFIX_MAX];
69 uint32_t logtype_main;
71 sfc_vdpa_filter_t filters;
75 int vfio_container_fd;
77 struct sfc_vdpa_ops_data *ops_data;
81 sfc_vdpa_register_logtype(const struct rte_pci_addr *pci_addr,
82 const char *lt_prefix_str,
85 struct sfc_vdpa_adapter *
86 sfc_vdpa_get_adapter_by_dev(struct rte_pci_device *pdev);
87 struct sfc_vdpa_ops_data *
88 sfc_vdpa_get_data_by_dev(struct rte_vdpa_device *vdpa_dev);
91 sfc_vdpa_hw_init(struct sfc_vdpa_adapter *sva);
93 sfc_vdpa_hw_fini(struct sfc_vdpa_adapter *sva);
96 sfc_vdpa_mcdi_init(struct sfc_vdpa_adapter *sva);
98 sfc_vdpa_mcdi_fini(struct sfc_vdpa_adapter *sva);
101 sfc_vdpa_dma_alloc(struct sfc_vdpa_adapter *sva, const char *name,
102 size_t len, efsys_mem_t *esmp);
105 sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp);
108 sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *vdpa_data, bool do_map);
111 sfc_vdpa_filter_remove(struct sfc_vdpa_ops_data *ops_data);
113 sfc_vdpa_filter_config(struct sfc_vdpa_ops_data *ops_data);
115 static inline struct sfc_vdpa_adapter *
116 sfc_vdpa_adapter_by_dev_handle(void *dev_handle)
118 return (struct sfc_vdpa_adapter *)dev_handle;
122 * Add wrapper functions to acquire/release lock to be able to remove or
123 * change the lock in one place.
126 sfc_vdpa_adapter_lock_init(struct sfc_vdpa_adapter *sva)
128 rte_spinlock_init(&sva->lock);
132 sfc_vdpa_adapter_is_locked(struct sfc_vdpa_adapter *sva)
134 return rte_spinlock_is_locked(&sva->lock);
138 sfc_vdpa_adapter_lock(struct sfc_vdpa_adapter *sva)
140 rte_spinlock_lock(&sva->lock);
144 sfc_vdpa_adapter_trylock(struct sfc_vdpa_adapter *sva)
146 return rte_spinlock_trylock(&sva->lock);
150 sfc_vdpa_adapter_unlock(struct sfc_vdpa_adapter *sva)
152 rte_spinlock_unlock(&sva->lock);
156 sfc_vdpa_adapter_lock_fini(__rte_unused struct sfc_vdpa_adapter *sva)
158 /* Just for symmetry of the API */
161 #endif /* _SFC_VDPA_H */