1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <ethdev_driver.h>
28 SFC_EVQ_UNINITIALIZED = 0,
37 SFC_EVQ_TYPE_MGMT = 0,
45 /* Used on datapath */
47 const efx_ev_callbacks_t *callbacks;
48 unsigned int read_ptr;
49 unsigned int read_ptr_primed;
52 struct sfc_dp_rxq *dp_rxq;
53 struct sfc_dp_txq *dp_txq;
55 /* Not used on datapath */
56 struct sfc_adapter *sa;
57 unsigned int evq_index;
58 enum sfc_evq_state init_state;
59 enum sfc_evq_type type;
63 static inline sfc_sw_index_t
64 sfc_mgmt_evq_sw_index(__rte_unused const struct sfc_adapter_shared *sas)
69 /* Return the number of Rx queues reserved for driver's internal use */
70 static inline unsigned int
71 sfc_nb_reserved_rxq(const struct sfc_adapter_shared *sas)
73 return sfc_nb_counter_rxq(sas) + sfc_repr_nb_rxq(sas);
76 /* Return the number of Tx queues reserved for driver's internal use */
77 static inline unsigned int
78 sfc_nb_txq_reserved(const struct sfc_adapter_shared *sas)
80 return sfc_repr_nb_txq(sas);
83 static inline unsigned int
84 sfc_nb_reserved_evq(const struct sfc_adapter_shared *sas)
86 /* An EvQ is required for each reserved Rx/Tx queue */
87 return 1 + sfc_nb_reserved_rxq(sas) + sfc_nb_txq_reserved(sas);
91 * The mapping functions that return SW index of a specific reserved
92 * queue rely on the relative order of reserved queues. Some reserved
93 * queues are optional, and if they are disabled or not supported, then
94 * the function for that specific reserved queue will return previous
95 * valid index of a reserved queue in the dependency chain or
96 * SFC_SW_INDEX_INVALID if it is the first reserved queue in the chain.
97 * If at least one of the reserved queues in the chain is enabled, then
98 * the corresponding function will give valid SW index, even if previous
99 * functions in the chain returned SFC_SW_INDEX_INVALID, since this value
100 * is one less than the first valid SW index.
102 * The dependency mechanism is utilized to avoid regid defines for SW indices
103 * for reserved queues and to allow these indices to shrink and make space
104 * for ethdev queue indices when some of the reserved queues are disabled.
107 static inline sfc_sw_index_t
108 sfc_counters_rxq_sw_index(const struct sfc_adapter_shared *sas)
110 return sas->counters_rxq_allocated ? 0 : SFC_SW_INDEX_INVALID;
113 static inline sfc_sw_index_t
114 sfc_repr_rxq_sw_index(const struct sfc_adapter_shared *sas,
115 unsigned int repr_queue_id)
117 return sfc_counters_rxq_sw_index(sas) + sfc_repr_nb_rxq(sas) +
121 static inline sfc_sw_index_t
122 sfc_repr_txq_sw_index(const struct sfc_adapter_shared *sas,
123 unsigned int repr_queue_id)
125 /* Reserved TxQ for representors is the first reserved TxQ */
126 return sfc_repr_available(sas) ? repr_queue_id : SFC_SW_INDEX_INVALID;
130 * Functions below define event queue to transmit/receive queue and vice
132 * SFC_ETHDEV_QID_INVALID is returned when sw_index is converted to
133 * ethdev_qid, but sw_index represents a reserved queue for driver's
135 * Own event queue is allocated for management, each Rx and each Tx queue.
136 * Zero event queue is used for management events.
137 * When counters are supported, one Rx event queue is reserved.
138 * When representors are supported, Rx and Tx event queues are reserved.
139 * Rx event queues follow reserved event queues.
140 * Tx event queues follow Rx event queues.
143 static inline sfc_ethdev_qid_t
144 sfc_ethdev_rx_qid_by_rxq_sw_index(struct sfc_adapter_shared *sas,
145 sfc_sw_index_t rxq_sw_index)
147 if (rxq_sw_index < sfc_nb_reserved_rxq(sas))
148 return SFC_ETHDEV_QID_INVALID;
150 return rxq_sw_index - sfc_nb_reserved_rxq(sas);
153 static inline sfc_sw_index_t
154 sfc_rxq_sw_index_by_ethdev_rx_qid(struct sfc_adapter_shared *sas,
155 sfc_ethdev_qid_t ethdev_qid)
157 return sfc_nb_reserved_rxq(sas) + ethdev_qid;
160 static inline sfc_sw_index_t
161 sfc_evq_sw_index_by_rxq_sw_index(struct sfc_adapter *sa,
162 sfc_sw_index_t rxq_sw_index)
164 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
165 sfc_ethdev_qid_t ethdev_qid;
167 ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, rxq_sw_index);
168 if (ethdev_qid == SFC_ETHDEV_QID_INVALID) {
169 /* One EvQ is reserved for management */
170 return 1 + rxq_sw_index;
173 return sfc_nb_reserved_evq(sas) + ethdev_qid;
176 static inline sfc_ethdev_qid_t
177 sfc_ethdev_tx_qid_by_txq_sw_index(struct sfc_adapter_shared *sas,
178 sfc_sw_index_t txq_sw_index)
180 if (txq_sw_index < sfc_nb_txq_reserved(sas))
181 return SFC_ETHDEV_QID_INVALID;
183 return txq_sw_index - sfc_nb_txq_reserved(sas);
186 static inline sfc_sw_index_t
187 sfc_txq_sw_index_by_ethdev_tx_qid(struct sfc_adapter_shared *sas,
188 sfc_ethdev_qid_t ethdev_qid)
190 return sfc_nb_txq_reserved(sas) + ethdev_qid;
193 static inline sfc_sw_index_t
194 sfc_evq_sw_index_by_txq_sw_index(struct sfc_adapter *sa,
195 sfc_sw_index_t txq_sw_index)
197 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
198 sfc_ethdev_qid_t ethdev_qid;
200 ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, txq_sw_index);
201 if (ethdev_qid == SFC_ETHDEV_QID_INVALID) {
202 return sfc_nb_reserved_evq(sas) - sfc_nb_txq_reserved(sas) +
206 return sfc_nb_reserved_evq(sas) + sa->eth_dev->data->nb_rx_queues +
210 int sfc_ev_attach(struct sfc_adapter *sa);
211 void sfc_ev_detach(struct sfc_adapter *sa);
212 int sfc_ev_start(struct sfc_adapter *sa);
213 void sfc_ev_stop(struct sfc_adapter *sa);
215 int sfc_ev_qinit(struct sfc_adapter *sa,
216 enum sfc_evq_type type, unsigned int type_index,
217 unsigned int entries, int socket_id, struct sfc_evq **evqp);
218 void sfc_ev_qfini(struct sfc_evq *evq);
219 int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index);
220 void sfc_ev_qstop(struct sfc_evq *evq);
222 int sfc_ev_qprime(struct sfc_evq *evq);
223 void sfc_ev_qpoll(struct sfc_evq *evq);
225 void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa);
230 #endif /* _SFC_EV_H_ */