1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <ethdev_driver.h>
28 SFC_EVQ_UNINITIALIZED = 0,
37 SFC_EVQ_TYPE_MGMT = 0,
45 /* Used on datapath */
47 const efx_ev_callbacks_t *callbacks;
48 unsigned int read_ptr;
49 unsigned int read_ptr_primed;
52 struct sfc_dp_rxq *dp_rxq;
53 struct sfc_dp_txq *dp_txq;
55 /* Not used on datapath */
56 struct sfc_adapter *sa;
57 unsigned int evq_index;
58 enum sfc_evq_state init_state;
59 enum sfc_evq_type type;
63 static inline sfc_sw_index_t
64 sfc_mgmt_evq_sw_index(__rte_unused const struct sfc_adapter_shared *sas)
69 /* Return the number of Rx queues reserved for driver's internal use */
70 static inline unsigned int
71 sfc_nb_reserved_rxq(const struct sfc_adapter_shared *sas)
73 return sfc_nb_counter_rxq(sas);
76 static inline unsigned int
77 sfc_nb_reserved_evq(const struct sfc_adapter_shared *sas)
79 /* An EvQ is required for each reserved RxQ */
80 return 1 + sfc_nb_reserved_rxq(sas);
84 * The mapping functions that return SW index of a specific reserved
85 * queue rely on the relative order of reserved queues. Some reserved
86 * queues are optional, and if they are disabled or not supported, then
87 * the function for that specific reserved queue will return previous
88 * valid index of a reserved queue in the dependency chain or
89 * SFC_SW_INDEX_INVALID if it is the first reserved queue in the chain.
90 * If at least one of the reserved queues in the chain is enabled, then
91 * the corresponding function will give valid SW index, even if previous
92 * functions in the chain returned SFC_SW_INDEX_INVALID, since this value
93 * is one less than the first valid SW index.
95 * The dependency mechanism is utilized to avoid regid defines for SW indices
96 * for reserved queues and to allow these indices to shrink and make space
97 * for ethdev queue indices when some of the reserved queues are disabled.
100 static inline sfc_sw_index_t
101 sfc_counters_rxq_sw_index(const struct sfc_adapter_shared *sas)
103 return sas->counters_rxq_allocated ? 0 : SFC_SW_INDEX_INVALID;
107 * Functions below define event queue to transmit/receive queue and vice
109 * SFC_ETHDEV_QID_INVALID is returned when sw_index is converted to
110 * ethdev_qid, but sw_index represents a reserved queue for driver's
112 * Own event queue is allocated for management, each Rx and each Tx queue.
113 * Zero event queue is used for management events.
114 * When counters are supported, one Rx event queue is reserved.
115 * Rx event queues follow reserved event queues.
116 * Tx event queues follow Rx event queues.
119 static inline sfc_ethdev_qid_t
120 sfc_ethdev_rx_qid_by_rxq_sw_index(struct sfc_adapter_shared *sas,
121 sfc_sw_index_t rxq_sw_index)
123 if (rxq_sw_index < sfc_nb_reserved_rxq(sas))
124 return SFC_ETHDEV_QID_INVALID;
126 return rxq_sw_index - sfc_nb_reserved_rxq(sas);
129 static inline sfc_sw_index_t
130 sfc_rxq_sw_index_by_ethdev_rx_qid(struct sfc_adapter_shared *sas,
131 sfc_ethdev_qid_t ethdev_qid)
133 return sfc_nb_reserved_rxq(sas) + ethdev_qid;
136 static inline sfc_sw_index_t
137 sfc_evq_sw_index_by_rxq_sw_index(struct sfc_adapter *sa,
138 sfc_sw_index_t rxq_sw_index)
140 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
141 sfc_ethdev_qid_t ethdev_qid;
143 ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, rxq_sw_index);
144 if (ethdev_qid == SFC_ETHDEV_QID_INVALID) {
145 /* One EvQ is reserved for management */
146 return 1 + rxq_sw_index;
149 return sfc_nb_reserved_evq(sas) + ethdev_qid;
152 static inline sfc_ethdev_qid_t
153 sfc_ethdev_tx_qid_by_txq_sw_index(__rte_unused struct sfc_adapter_shared *sas,
154 sfc_sw_index_t txq_sw_index)
156 /* Only ethdev queues are present for now */
160 static inline sfc_sw_index_t
161 sfc_txq_sw_index_by_ethdev_tx_qid(__rte_unused struct sfc_adapter_shared *sas,
162 sfc_ethdev_qid_t ethdev_qid)
164 /* Only ethdev queues are present for now */
168 static inline sfc_sw_index_t
169 sfc_evq_sw_index_by_txq_sw_index(struct sfc_adapter *sa,
170 sfc_sw_index_t txq_sw_index)
172 return sfc_nb_reserved_evq(sfc_sa2shared(sa)) +
173 sa->eth_dev->data->nb_rx_queues + txq_sw_index;
176 int sfc_ev_attach(struct sfc_adapter *sa);
177 void sfc_ev_detach(struct sfc_adapter *sa);
178 int sfc_ev_start(struct sfc_adapter *sa);
179 void sfc_ev_stop(struct sfc_adapter *sa);
181 int sfc_ev_qinit(struct sfc_adapter *sa,
182 enum sfc_evq_type type, unsigned int type_index,
183 unsigned int entries, int socket_id, struct sfc_evq **evqp);
184 void sfc_ev_qfini(struct sfc_evq *evq);
185 int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index);
186 void sfc_ev_qstop(struct sfc_evq *evq);
188 int sfc_ev_qprime(struct sfc_evq *evq);
189 void sfc_ev_qpoll(struct sfc_evq *evq);
191 void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa);
196 #endif /* _SFC_EV_H_ */