1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
8 #include <rte_ethdev.h>
10 #include "ice_dcf_ethdev.h"
14 ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
15 __rte_unused struct rte_mbuf **rx_pkts,
16 __rte_unused uint16_t nb_pkts)
22 ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
23 __rte_unused struct rte_mbuf **tx_pkts,
24 __rte_unused uint16_t nb_pkts)
30 ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
32 ice_dcf_vf_repr_init_vlan(dev);
38 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
40 dev->data->dev_link.link_status = ETH_LINK_UP;
46 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
48 dev->data->dev_link.link_status = ETH_LINK_DOWN;
54 ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev)
56 return ice_dcf_vf_repr_uninit(dev);
60 ice_dcf_vf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
61 __rte_unused uint16_t queue_id,
62 __rte_unused uint16_t nb_desc,
63 __rte_unused unsigned int socket_id,
64 __rte_unused const struct rte_eth_rxconf *conf,
65 __rte_unused struct rte_mempool *pool)
71 ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
72 __rte_unused uint16_t queue_id,
73 __rte_unused uint16_t nb_desc,
74 __rte_unused unsigned int socket_id,
75 __rte_unused const struct rte_eth_txconf *conf)
81 ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
87 ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
93 ice_dcf_vf_repr_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
99 ice_dcf_vf_repr_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
105 ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev,
106 __rte_unused int wait_to_complete)
111 static __rte_always_inline struct ice_dcf_hw *
112 ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr)
114 struct ice_dcf_adapter *dcf_adapter =
115 repr->dcf_eth_dev->data->dev_private;
118 PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n");
122 return &dcf_adapter->real_hw;
126 ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
127 struct rte_eth_dev_info *dev_info)
129 struct ice_dcf_vf_repr *repr = dev->data->dev_private;
130 struct ice_dcf_hw *dcf_hw = ice_dcf_vf_repr_hw(repr);
135 dev_info->device = dev->device;
136 dev_info->max_mac_addrs = 1;
137 dev_info->max_rx_queues = dcf_hw->vsi_res->num_queue_pairs;
138 dev_info->max_tx_queues = dcf_hw->vsi_res->num_queue_pairs;
139 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
140 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
141 dev_info->hash_key_size = dcf_hw->vf_res->rss_key_size;
142 dev_info->reta_size = dcf_hw->vf_res->rss_lut_size;
143 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
145 dev_info->rx_offload_capa =
146 DEV_RX_OFFLOAD_VLAN_STRIP |
147 DEV_RX_OFFLOAD_IPV4_CKSUM |
148 DEV_RX_OFFLOAD_UDP_CKSUM |
149 DEV_RX_OFFLOAD_TCP_CKSUM |
150 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
151 DEV_RX_OFFLOAD_SCATTER |
152 DEV_RX_OFFLOAD_JUMBO_FRAME |
153 DEV_RX_OFFLOAD_VLAN_FILTER |
154 DEV_RX_OFFLOAD_VLAN_EXTEND |
155 DEV_RX_OFFLOAD_RSS_HASH;
156 dev_info->tx_offload_capa =
157 DEV_TX_OFFLOAD_VLAN_INSERT |
158 DEV_TX_OFFLOAD_IPV4_CKSUM |
159 DEV_TX_OFFLOAD_UDP_CKSUM |
160 DEV_TX_OFFLOAD_TCP_CKSUM |
161 DEV_TX_OFFLOAD_SCTP_CKSUM |
162 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
163 DEV_TX_OFFLOAD_TCP_TSO |
164 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
165 DEV_TX_OFFLOAD_GRE_TNL_TSO |
166 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
167 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
168 DEV_TX_OFFLOAD_MULTI_SEGS;
170 dev_info->default_rxconf = (struct rte_eth_rxconf) {
172 .pthresh = ICE_DEFAULT_RX_PTHRESH,
173 .hthresh = ICE_DEFAULT_RX_HTHRESH,
174 .wthresh = ICE_DEFAULT_RX_WTHRESH,
176 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
181 dev_info->default_txconf = (struct rte_eth_txconf) {
183 .pthresh = ICE_DEFAULT_TX_PTHRESH,
184 .hthresh = ICE_DEFAULT_TX_HTHRESH,
185 .wthresh = ICE_DEFAULT_TX_WTHRESH,
187 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
188 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
192 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
193 .nb_max = ICE_MAX_RING_DESC,
194 .nb_min = ICE_MIN_RING_DESC,
195 .nb_align = ICE_ALIGN_RING_DESC,
198 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
199 .nb_max = ICE_MAX_RING_DESC,
200 .nb_min = ICE_MIN_RING_DESC,
201 .nb_align = ICE_ALIGN_RING_DESC,
204 dev_info->switch_info.name = dcf_hw->eth_dev->device->name;
205 dev_info->switch_info.domain_id = repr->switch_domain_id;
206 dev_info->switch_info.port_id = repr->vf_id;
211 static __rte_always_inline bool
212 ice_dcf_vlan_offload_ena(struct ice_dcf_vf_repr *repr)
214 return !!(ice_dcf_vf_repr_hw(repr)->vf_res->vf_cap_flags &
215 VIRTCHNL_VF_OFFLOAD_VLAN_V2);
219 ice_dcf_vlan_offload_config(struct ice_dcf_vf_repr *repr,
220 struct virtchnl_dcf_vlan_offload *vlan_offload)
222 struct dcf_virtchnl_cmd args;
225 memset(&args, 0, sizeof(args));
226 args.v_op = VIRTCHNL_OP_DCF_VLAN_OFFLOAD;
227 args.req_msg = (uint8_t *)vlan_offload;
228 args.req_msglen = sizeof(*vlan_offload);
230 err = ice_dcf_execute_virtchnl_cmd(ice_dcf_vf_repr_hw(repr), &args);
233 "Failed to execute command of VIRTCHNL_OP_DCF_VLAN_OFFLOAD");
239 ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
241 struct ice_dcf_vf_repr *repr = dev->data->dev_private;
242 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
243 struct virtchnl_dcf_vlan_offload vlan_offload;
246 if (!ice_dcf_vlan_offload_ena(repr))
249 /* Vlan stripping setting */
250 if (mask & ETH_VLAN_STRIP_MASK) {
251 bool enable = !!(dev_conf->rxmode.offloads &
252 DEV_RX_OFFLOAD_VLAN_STRIP);
254 if (enable && repr->outer_vlan_info.port_vlan_ena) {
256 "Disable the port VLAN firstly\n");
260 memset(&vlan_offload, 0, sizeof(vlan_offload));
263 vlan_offload.vlan_flags =
264 VIRTCHNL_DCF_VLAN_STRIP_INTO_RX_DESC <<
265 VIRTCHNL_DCF_VLAN_STRIP_MODE_S;
266 else if (repr->outer_vlan_info.stripping_ena && !enable)
267 vlan_offload.vlan_flags =
268 VIRTCHNL_DCF_VLAN_STRIP_DISABLE <<
269 VIRTCHNL_DCF_VLAN_STRIP_MODE_S;
271 if (vlan_offload.vlan_flags) {
272 vlan_offload.vf_id = repr->vf_id;
273 vlan_offload.tpid = repr->outer_vlan_info.tpid;
274 vlan_offload.vlan_flags |=
275 VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
276 VIRTCHNL_DCF_VLAN_TYPE_S;
278 err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
282 repr->outer_vlan_info.stripping_ena = enable;
290 ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev *dev,
291 uint16_t pvid, int on)
293 struct ice_dcf_vf_repr *repr = dev->data->dev_private;
294 struct virtchnl_dcf_vlan_offload vlan_offload;
297 if (!ice_dcf_vlan_offload_ena(repr))
300 if (repr->outer_vlan_info.stripping_ena) {
302 "Disable the VLAN stripping firstly\n");
306 if (pvid > RTE_ETHER_MAX_VLAN_ID)
309 memset(&vlan_offload, 0, sizeof(vlan_offload));
312 vlan_offload.vlan_flags =
313 (VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED <<
314 VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
316 vlan_offload.vlan_flags =
317 (VIRTCHNL_DCF_VLAN_INSERT_DISABLE <<
318 VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
320 vlan_offload.vf_id = repr->vf_id;
321 vlan_offload.tpid = repr->outer_vlan_info.tpid;
322 vlan_offload.vlan_flags |= (VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
323 VIRTCHNL_DCF_VLAN_TYPE_S);
324 vlan_offload.vlan_id = pvid;
326 err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
329 repr->outer_vlan_info.port_vlan_ena = true;
330 repr->outer_vlan_info.vid = pvid;
332 repr->outer_vlan_info.port_vlan_ena = false;
340 ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
341 enum rte_vlan_type vlan_type, uint16_t tpid)
343 struct ice_dcf_vf_repr *repr = dev->data->dev_private;
346 if (!ice_dcf_vlan_offload_ena(repr))
349 if (vlan_type != ETH_VLAN_TYPE_OUTER) {
351 "Can accelerate only outer VLAN in QinQ\n");
355 if (tpid != RTE_ETHER_TYPE_QINQ &&
356 tpid != RTE_ETHER_TYPE_VLAN &&
357 tpid != RTE_ETHER_TYPE_QINQ1) {
359 "Invalid TPID: 0x%04x\n", tpid);
363 repr->outer_vlan_info.tpid = tpid;
365 if (repr->outer_vlan_info.port_vlan_ena) {
366 err = ice_dcf_vf_repr_vlan_pvid_set(dev,
367 repr->outer_vlan_info.vid,
371 "Failed to reset port VLAN : %d\n",
377 if (repr->outer_vlan_info.stripping_ena) {
378 err = ice_dcf_vf_repr_vlan_offload_set(dev,
379 ETH_VLAN_STRIP_MASK);
382 "Failed to reset VLAN stripping : %d\n",
391 static const struct eth_dev_ops ice_dcf_vf_repr_dev_ops = {
392 .dev_configure = ice_dcf_vf_repr_dev_configure,
393 .dev_start = ice_dcf_vf_repr_dev_start,
394 .dev_stop = ice_dcf_vf_repr_dev_stop,
395 .dev_close = ice_dcf_vf_repr_dev_close,
396 .dev_infos_get = ice_dcf_vf_repr_dev_info_get,
397 .rx_queue_setup = ice_dcf_vf_repr_rx_queue_setup,
398 .tx_queue_setup = ice_dcf_vf_repr_tx_queue_setup,
399 .promiscuous_enable = ice_dcf_vf_repr_promiscuous_enable,
400 .promiscuous_disable = ice_dcf_vf_repr_promiscuous_disable,
401 .allmulticast_enable = ice_dcf_vf_repr_allmulticast_enable,
402 .allmulticast_disable = ice_dcf_vf_repr_allmulticast_disable,
403 .link_update = ice_dcf_vf_repr_link_update,
404 .vlan_offload_set = ice_dcf_vf_repr_vlan_offload_set,
405 .vlan_pvid_set = ice_dcf_vf_repr_vlan_pvid_set,
406 .vlan_tpid_set = ice_dcf_vf_repr_vlan_tpid_set,
410 ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param)
412 struct ice_dcf_vf_repr *repr = vf_rep_eth_dev->data->dev_private;
413 struct ice_dcf_vf_repr_param *param = init_param;
415 repr->dcf_eth_dev = param->dcf_eth_dev;
416 repr->switch_domain_id = param->switch_domain_id;
417 repr->vf_id = param->vf_id;
418 repr->outer_vlan_info.port_vlan_ena = false;
419 repr->outer_vlan_info.stripping_ena = false;
420 repr->outer_vlan_info.tpid = RTE_ETHER_TYPE_VLAN;
422 vf_rep_eth_dev->dev_ops = &ice_dcf_vf_repr_dev_ops;
424 vf_rep_eth_dev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst;
425 vf_rep_eth_dev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
427 vf_rep_eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
428 vf_rep_eth_dev->data->representor_id = repr->vf_id;
429 vf_rep_eth_dev->data->backer_port_id = repr->dcf_eth_dev->data->port_id;
431 vf_rep_eth_dev->data->mac_addrs = &repr->mac_addr;
433 rte_eth_random_addr(repr->mac_addr.addr_bytes);
439 ice_dcf_vf_repr_uninit(struct rte_eth_dev *vf_rep_eth_dev)
441 vf_rep_eth_dev->data->mac_addrs = NULL;
447 ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
449 struct ice_dcf_vf_repr *repr = vf_rep_eth_dev->data->dev_private;
452 err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
453 ETH_VLAN_STRIP_MASK);
455 PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
459 if (repr->outer_vlan_info.port_vlan_ena) {
460 err = ice_dcf_vf_repr_vlan_pvid_set(vf_rep_eth_dev,
461 repr->outer_vlan_info.vid,
464 PMD_DRV_LOG(ERR, "Failed to enable port VLAN");
473 ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter)
478 if (!dcf_adapter->repr_infos)
481 for (vf_id = 0; vf_id < dcf_adapter->real_hw.num_vfs; vf_id++) {
482 struct rte_eth_dev *vf_rep_eth_dev =
483 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev;
484 if (!vf_rep_eth_dev || vf_rep_eth_dev->data->dev_started == 0)
487 ret = ice_dcf_vf_repr_dev_stop(vf_rep_eth_dev);
489 vf_rep_eth_dev->data->dev_started = 0;