1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
8 #include <rte_ethdev.h>
10 #include "ice_dcf_ethdev.h"
14 ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
15 __rte_unused struct rte_mbuf **rx_pkts,
16 __rte_unused uint16_t nb_pkts)
22 ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
23 __rte_unused struct rte_mbuf **tx_pkts,
24 __rte_unused uint16_t nb_pkts)
30 ice_dcf_vf_repr_dev_configure(__rte_unused struct rte_eth_dev *dev)
36 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
38 dev->data->dev_link.link_status = ETH_LINK_UP;
44 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
46 dev->data->dev_link.link_status = ETH_LINK_DOWN;
52 ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev)
54 return ice_dcf_vf_repr_uninit(dev);
58 ice_dcf_vf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
59 __rte_unused uint16_t queue_id,
60 __rte_unused uint16_t nb_desc,
61 __rte_unused unsigned int socket_id,
62 __rte_unused const struct rte_eth_rxconf *conf,
63 __rte_unused struct rte_mempool *pool)
69 ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
70 __rte_unused uint16_t queue_id,
71 __rte_unused uint16_t nb_desc,
72 __rte_unused unsigned int socket_id,
73 __rte_unused const struct rte_eth_txconf *conf)
79 ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
85 ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
91 ice_dcf_vf_repr_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
97 ice_dcf_vf_repr_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
103 ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev,
104 __rte_unused int wait_to_complete)
110 ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
111 struct rte_eth_dev_info *dev_info)
113 struct ice_dcf_vf_repr *repr = dev->data->dev_private;
114 struct ice_dcf_hw *dcf_hw =
115 &repr->dcf_adapter->real_hw;
117 dev_info->device = dev->device;
118 dev_info->max_mac_addrs = 1;
119 dev_info->max_rx_queues = dcf_hw->vsi_res->num_queue_pairs;
120 dev_info->max_tx_queues = dcf_hw->vsi_res->num_queue_pairs;
121 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
122 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
123 dev_info->hash_key_size = dcf_hw->vf_res->rss_key_size;
124 dev_info->reta_size = dcf_hw->vf_res->rss_lut_size;
125 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
127 dev_info->rx_offload_capa =
128 DEV_RX_OFFLOAD_VLAN_STRIP |
129 DEV_RX_OFFLOAD_IPV4_CKSUM |
130 DEV_RX_OFFLOAD_UDP_CKSUM |
131 DEV_RX_OFFLOAD_TCP_CKSUM |
132 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
133 DEV_RX_OFFLOAD_SCATTER |
134 DEV_RX_OFFLOAD_JUMBO_FRAME |
135 DEV_RX_OFFLOAD_VLAN_FILTER |
136 DEV_RX_OFFLOAD_VLAN_EXTEND |
137 DEV_RX_OFFLOAD_RSS_HASH;
138 dev_info->tx_offload_capa =
139 DEV_TX_OFFLOAD_VLAN_INSERT |
140 DEV_TX_OFFLOAD_IPV4_CKSUM |
141 DEV_TX_OFFLOAD_UDP_CKSUM |
142 DEV_TX_OFFLOAD_TCP_CKSUM |
143 DEV_TX_OFFLOAD_SCTP_CKSUM |
144 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
145 DEV_TX_OFFLOAD_TCP_TSO |
146 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
147 DEV_TX_OFFLOAD_GRE_TNL_TSO |
148 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
149 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
150 DEV_TX_OFFLOAD_MULTI_SEGS;
152 dev_info->default_rxconf = (struct rte_eth_rxconf) {
154 .pthresh = ICE_DEFAULT_RX_PTHRESH,
155 .hthresh = ICE_DEFAULT_RX_HTHRESH,
156 .wthresh = ICE_DEFAULT_RX_WTHRESH,
158 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
163 dev_info->default_txconf = (struct rte_eth_txconf) {
165 .pthresh = ICE_DEFAULT_TX_PTHRESH,
166 .hthresh = ICE_DEFAULT_TX_HTHRESH,
167 .wthresh = ICE_DEFAULT_TX_WTHRESH,
169 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
170 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
174 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
175 .nb_max = ICE_MAX_RING_DESC,
176 .nb_min = ICE_MIN_RING_DESC,
177 .nb_align = ICE_ALIGN_RING_DESC,
180 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
181 .nb_max = ICE_MAX_RING_DESC,
182 .nb_min = ICE_MIN_RING_DESC,
183 .nb_align = ICE_ALIGN_RING_DESC,
186 dev_info->switch_info.name = dcf_hw->eth_dev->device->name;
187 dev_info->switch_info.domain_id = repr->switch_domain_id;
188 dev_info->switch_info.port_id = repr->vf_id;
194 ice_dcf_vlan_offload_config(struct ice_dcf_vf_repr *repr,
195 struct virtchnl_dcf_vlan_offload *vlan_offload)
197 struct dcf_virtchnl_cmd args;
199 memset(&args, 0, sizeof(args));
200 args.v_op = VIRTCHNL_OP_DCF_VLAN_OFFLOAD;
201 args.req_msg = (uint8_t *)vlan_offload;
202 args.req_msglen = sizeof(*vlan_offload);
204 return ice_dcf_execute_virtchnl_cmd(&repr->dcf_adapter->real_hw, &args);
207 static __rte_always_inline bool
208 ice_dcf_vlan_offload_ena(struct ice_dcf_vf_repr *repr)
210 return !!(repr->dcf_adapter->real_hw.vf_res->vf_cap_flags &
211 VIRTCHNL_VF_OFFLOAD_VLAN_V2);
215 ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev *dev,
216 uint16_t pvid, int on)
218 struct ice_dcf_vf_repr *repr = dev->data->dev_private;
219 struct virtchnl_dcf_vlan_offload vlan_offload;
222 if (!ice_dcf_vlan_offload_ena(repr))
225 if (on && (pvid == 0 || pvid > RTE_ETHER_MAX_VLAN_ID))
228 memset(&vlan_offload, 0, sizeof(vlan_offload));
230 vlan_offload.vf_id = repr->vf_id;
231 vlan_offload.tpid = repr->port_vlan_info.tpid;
232 vlan_offload.vlan_flags = (VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
233 VIRTCHNL_DCF_VLAN_TYPE_S) |
234 (VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED <<
235 VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
236 vlan_offload.vlan_id = on ? pvid : 0;
238 err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
241 repr->port_vlan_ena = true;
242 repr->port_vlan_info.vid = pvid;
244 repr->port_vlan_ena = false;
252 ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
253 enum rte_vlan_type vlan_type, uint16_t tpid)
255 struct ice_dcf_vf_repr *repr = dev->data->dev_private;
258 if (!ice_dcf_vlan_offload_ena(repr))
261 if (vlan_type != ETH_VLAN_TYPE_OUTER) {
263 "Can accelerate only outer VLAN in QinQ\n");
267 if (tpid != RTE_ETHER_TYPE_QINQ &&
268 tpid != RTE_ETHER_TYPE_VLAN &&
269 tpid != RTE_ETHER_TYPE_QINQ1) {
271 "Invalid TPID: 0x%04x\n", tpid);
275 repr->port_vlan_info.tpid = tpid;
277 if (repr->port_vlan_ena)
278 err = ice_dcf_vf_repr_vlan_pvid_set(dev,
279 repr->port_vlan_info.vid,
284 static const struct eth_dev_ops ice_dcf_vf_repr_dev_ops = {
285 .dev_configure = ice_dcf_vf_repr_dev_configure,
286 .dev_start = ice_dcf_vf_repr_dev_start,
287 .dev_stop = ice_dcf_vf_repr_dev_stop,
288 .dev_close = ice_dcf_vf_repr_dev_close,
289 .dev_infos_get = ice_dcf_vf_repr_dev_info_get,
290 .rx_queue_setup = ice_dcf_vf_repr_rx_queue_setup,
291 .tx_queue_setup = ice_dcf_vf_repr_tx_queue_setup,
292 .promiscuous_enable = ice_dcf_vf_repr_promiscuous_enable,
293 .promiscuous_disable = ice_dcf_vf_repr_promiscuous_disable,
294 .allmulticast_enable = ice_dcf_vf_repr_allmulticast_enable,
295 .allmulticast_disable = ice_dcf_vf_repr_allmulticast_disable,
296 .link_update = ice_dcf_vf_repr_link_update,
297 .vlan_pvid_set = ice_dcf_vf_repr_vlan_pvid_set,
298 .vlan_tpid_set = ice_dcf_vf_repr_vlan_tpid_set,
302 ice_dcf_vf_repr_init(struct rte_eth_dev *ethdev, void *init_param)
304 struct ice_dcf_vf_repr *repr = ethdev->data->dev_private;
305 struct ice_dcf_vf_repr_param *param = init_param;
307 repr->dcf_adapter = param->adapter;
308 repr->switch_domain_id = param->switch_domain_id;
309 repr->vf_id = param->vf_id;
310 repr->port_vlan_ena = false;
311 repr->port_vlan_info.tpid = RTE_ETHER_TYPE_VLAN;
313 ethdev->dev_ops = &ice_dcf_vf_repr_dev_ops;
315 ethdev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst;
316 ethdev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
318 ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
319 ethdev->data->representor_id = repr->vf_id;
321 ethdev->data->mac_addrs = &repr->mac_addr;
323 rte_eth_random_addr(repr->mac_addr.addr_bytes);
329 ice_dcf_vf_repr_uninit(struct rte_eth_dev *ethdev)
331 ethdev->data->mac_addrs = NULL;