2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
12 qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
16 for_each_hwfn(edev, i) {
17 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
19 struct ecore_sp_vport_start_params start = { 0 };
21 start.tpa_mode = p_params->gro_enable ? ECORE_TPA_MODE_GRO :
23 start.remove_inner_vlan = p_params->remove_inner_vlan;
24 start.tx_switching = tx_switching;
25 start.only_untagged = false; /* untagged only */
26 start.drop_ttl0 = p_params->drop_ttl0;
27 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
28 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
29 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
30 start.handle_ptp_pkts = p_params->handle_ptp_pkts;
31 start.vport_id = p_params->vport_id;
32 start.max_buffers_per_cqe = 16; /* TODO-is this right */
33 start.mtu = p_params->mtu;
34 /* @DPDK - Disable FW placement */
35 start.zero_placement_offset = 1;
37 rc = ecore_sp_vport_start(p_hwfn, &start);
39 DP_ERR(edev, "Failed to start VPORT\n");
43 DP_VERBOSE(edev, ECORE_MSG_SPQ,
44 "Started V-PORT %d with MTU %d\n",
45 p_params->vport_id, p_params->mtu);
48 ecore_reset_vport_stats(edev);
53 static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
57 for_each_hwfn(edev, i) {
58 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
59 rc = ecore_sp_vport_stop(p_hwfn,
60 p_hwfn->hw_info.opaque_fid, vport_id);
63 DP_ERR(edev, "Failed to stop VPORT\n");
71 bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl)
74 bool rss_mode = 0; /* disable */
77 /* Find largest entry, since it's possible RSS needs to
78 * be disabled [in case only 1 queue per-hwfn]
80 for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
81 max = (max > p_tbl[k]) ? max : p_tbl[k];
83 /* Either fix RSS values or disable RSS */
84 if (edev->num_hwfns < max + 1) {
85 divisor = (max + edev->num_hwfns - 1) / edev->num_hwfns;
86 DP_VERBOSE(edev, ECORE_MSG_SPQ,
87 "CMT - fixing RSS values (modulo %02x)\n",
89 for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
90 p_tbl[k] = p_tbl[k] % divisor;
99 qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
101 struct ecore_sp_vport_update_params sp_params;
102 struct ecore_rss_params sp_rss_params;
105 memset(&sp_params, 0, sizeof(sp_params));
106 memset(&sp_rss_params, 0, sizeof(sp_rss_params));
108 /* Translate protocol params into sp params */
109 sp_params.vport_id = params->vport_id;
110 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
111 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
112 sp_params.vport_active_rx_flg = params->vport_active_flg;
113 sp_params.vport_active_tx_flg = params->vport_active_flg;
114 sp_params.update_inner_vlan_removal_flg =
115 params->update_inner_vlan_removal_flg;
116 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
117 sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
118 sp_params.tx_switching_flg = params->tx_switching_flg;
119 sp_params.accept_any_vlan = params->accept_any_vlan;
120 sp_params.update_accept_any_vlan_flg =
121 params->update_accept_any_vlan_flg;
122 sp_params.mtu = params->mtu;
124 for_each_hwfn(edev, i) {
125 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
127 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
128 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
129 ECORE_SPQ_MODE_EBLOCK, NULL);
131 DP_ERR(edev, "Failed to update VPORT\n");
135 DP_VERBOSE(edev, ECORE_MSG_SPQ,
136 "Updated V-PORT %d: active_flag %d [update %d]\n",
137 params->vport_id, params->vport_active_flg,
138 params->update_vport_active_flg);
145 qed_start_rxq(struct ecore_dev *edev,
147 struct ecore_queue_start_common_params *p_params,
148 uint16_t bd_max_bytes,
149 dma_addr_t bd_chain_phys_addr,
150 dma_addr_t cqe_pbl_addr,
151 uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
153 struct ecore_hwfn *p_hwfn;
156 hwfn_index = rss_num % edev->num_hwfns;
157 p_hwfn = &edev->hwfns[hwfn_index];
159 p_params->queue_id = p_params->queue_id / edev->num_hwfns;
160 p_params->stats_id = p_params->vport_id;
162 rc = ecore_sp_eth_rx_queue_start(p_hwfn,
163 p_hwfn->hw_info.opaque_fid,
167 cqe_pbl_addr, cqe_pbl_size, pp_prod);
170 DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
174 DP_VERBOSE(edev, ECORE_MSG_SPQ,
175 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
176 p_params->queue_id, rss_num, p_params->vport_id,
183 qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
186 struct ecore_hwfn *p_hwfn;
188 hwfn_index = params->rss_id % edev->num_hwfns;
189 p_hwfn = &edev->hwfns[hwfn_index];
191 rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
192 params->rx_queue_id / edev->num_hwfns,
193 params->eq_completion_only, false);
195 DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
203 qed_start_txq(struct ecore_dev *edev,
205 struct ecore_queue_start_common_params *p_params,
207 uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
209 struct ecore_hwfn *p_hwfn;
212 hwfn_index = rss_num % edev->num_hwfns;
213 p_hwfn = &edev->hwfns[hwfn_index];
215 p_params->queue_id = p_params->queue_id / edev->num_hwfns;
216 p_params->qzone_id = p_params->queue_id;
217 p_params->stats_id = p_params->vport_id;
219 rc = ecore_sp_eth_tx_queue_start(p_hwfn,
220 p_hwfn->hw_info.opaque_fid,
223 pbl_addr, pbl_size, pp_doorbell);
226 DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
230 DP_VERBOSE(edev, ECORE_MSG_SPQ,
231 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
232 p_params->queue_id, rss_num, p_params->vport_id,
239 qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params)
241 struct ecore_hwfn *p_hwfn;
244 hwfn_index = params->rss_id % edev->num_hwfns;
245 p_hwfn = &edev->hwfns[hwfn_index];
247 rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
248 params->tx_queue_id / edev->num_hwfns);
250 DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
258 qed_fp_cqe_completion(struct ecore_dev *edev,
259 uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
261 return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
265 static int qed_fastpath_stop(struct ecore_dev *edev)
267 ecore_hw_stop_fastpath(edev);
272 static void qed_fastpath_start(struct ecore_dev *edev)
274 struct ecore_hwfn *p_hwfn;
277 for_each_hwfn(edev, i) {
278 p_hwfn = &edev->hwfns[i];
279 ecore_hw_start_fastpath(p_hwfn);
284 qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
286 ecore_get_vport_stats(edev, stats);
289 int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
290 enum qed_filter_rx_mode_type type)
292 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
293 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
294 struct ecore_filter_accept_flags flags;
296 memset(&flags, 0, sizeof(flags));
298 flags.update_rx_mode_config = 1;
299 flags.update_tx_mode_config = 1;
300 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
301 ECORE_ACCEPT_MCAST_MATCHED |
304 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
305 ECORE_ACCEPT_MCAST_MATCHED |
308 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
309 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
311 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
312 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
314 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
315 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
316 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
317 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
318 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
319 ECORE_ACCEPT_MCAST_UNMATCHED;
322 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
323 ECORE_SPQ_MODE_CB, NULL);
326 static const struct qed_eth_ops qed_eth_ops_pass = {
327 INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
328 INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
329 INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
330 INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
331 INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
332 INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
333 INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
334 INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
335 INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
336 INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
337 INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
338 INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
339 INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
342 const struct qed_eth_ops *qed_get_eth_ops(void)
344 return &qed_eth_ops_pass;