net/ice: add DCF VLAN handling
[dpdk.git] / drivers / net / ice / ice_dcf_vf_representor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <errno.h>
6 #include <sys/types.h>
7
8 #include <rte_ethdev.h>
9
10 #include "ice_dcf_ethdev.h"
11 #include "ice_rxtx.h"
12
13 static uint16_t
14 ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
15                          __rte_unused struct rte_mbuf **rx_pkts,
16                          __rte_unused uint16_t nb_pkts)
17 {
18         return 0;
19 }
20
21 static uint16_t
22 ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
23                          __rte_unused struct rte_mbuf **tx_pkts,
24                          __rte_unused uint16_t nb_pkts)
25 {
26         return 0;
27 }
28
29 static int
30 ice_dcf_vf_repr_dev_configure(__rte_unused struct rte_eth_dev *dev)
31 {
32         return 0;
33 }
34
35 static int
36 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
37 {
38         dev->data->dev_link.link_status = ETH_LINK_UP;
39
40         return 0;
41 }
42
43 static int
44 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
45 {
46         dev->data->dev_link.link_status = ETH_LINK_DOWN;
47
48         return 0;
49 }
50
51 static int
52 ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev)
53 {
54         return ice_dcf_vf_repr_uninit(dev);
55 }
56
57 static int
58 ice_dcf_vf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
59                                __rte_unused uint16_t queue_id,
60                                __rte_unused uint16_t nb_desc,
61                                __rte_unused unsigned int socket_id,
62                                __rte_unused const struct rte_eth_rxconf *conf,
63                                __rte_unused struct rte_mempool *pool)
64 {
65         return 0;
66 }
67
68 static int
69 ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
70                                __rte_unused uint16_t queue_id,
71                                __rte_unused uint16_t nb_desc,
72                                __rte_unused unsigned int socket_id,
73                                __rte_unused const struct rte_eth_txconf *conf)
74 {
75         return 0;
76 }
77
78 static int
79 ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
80 {
81         return 0;
82 }
83
84 static int
85 ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
86 {
87         return 0;
88 }
89
90 static int
91 ice_dcf_vf_repr_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
92 {
93         return 0;
94 }
95
96 static int
97 ice_dcf_vf_repr_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
98 {
99         return 0;
100 }
101
102 static int
103 ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev,
104                             __rte_unused int wait_to_complete)
105 {
106         return 0;
107 }
108
109 static int
110 ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
111                              struct rte_eth_dev_info *dev_info)
112 {
113         struct ice_dcf_vf_repr *repr = dev->data->dev_private;
114         struct ice_dcf_hw *dcf_hw =
115                                 &repr->dcf_adapter->real_hw;
116
117         dev_info->device = dev->device;
118         dev_info->max_mac_addrs = 1;
119         dev_info->max_rx_queues = dcf_hw->vsi_res->num_queue_pairs;
120         dev_info->max_tx_queues = dcf_hw->vsi_res->num_queue_pairs;
121         dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
122         dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
123         dev_info->hash_key_size = dcf_hw->vf_res->rss_key_size;
124         dev_info->reta_size = dcf_hw->vf_res->rss_lut_size;
125         dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
126
127         dev_info->rx_offload_capa =
128                 DEV_RX_OFFLOAD_VLAN_STRIP |
129                 DEV_RX_OFFLOAD_IPV4_CKSUM |
130                 DEV_RX_OFFLOAD_UDP_CKSUM |
131                 DEV_RX_OFFLOAD_TCP_CKSUM |
132                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
133                 DEV_RX_OFFLOAD_SCATTER |
134                 DEV_RX_OFFLOAD_JUMBO_FRAME |
135                 DEV_RX_OFFLOAD_VLAN_FILTER |
136                 DEV_RX_OFFLOAD_VLAN_EXTEND |
137                 DEV_RX_OFFLOAD_RSS_HASH;
138         dev_info->tx_offload_capa =
139                 DEV_TX_OFFLOAD_VLAN_INSERT |
140                 DEV_TX_OFFLOAD_IPV4_CKSUM |
141                 DEV_TX_OFFLOAD_UDP_CKSUM |
142                 DEV_TX_OFFLOAD_TCP_CKSUM |
143                 DEV_TX_OFFLOAD_SCTP_CKSUM |
144                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
145                 DEV_TX_OFFLOAD_TCP_TSO |
146                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
147                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
148                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
149                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
150                 DEV_TX_OFFLOAD_MULTI_SEGS;
151
152         dev_info->default_rxconf = (struct rte_eth_rxconf) {
153                 .rx_thresh = {
154                         .pthresh = ICE_DEFAULT_RX_PTHRESH,
155                         .hthresh = ICE_DEFAULT_RX_HTHRESH,
156                         .wthresh = ICE_DEFAULT_RX_WTHRESH,
157                 },
158                 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
159                 .rx_drop_en = 0,
160                 .offloads = 0,
161         };
162
163         dev_info->default_txconf = (struct rte_eth_txconf) {
164                 .tx_thresh = {
165                         .pthresh = ICE_DEFAULT_TX_PTHRESH,
166                         .hthresh = ICE_DEFAULT_TX_HTHRESH,
167                         .wthresh = ICE_DEFAULT_TX_WTHRESH,
168                 },
169                 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
170                 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
171                 .offloads = 0,
172         };
173
174         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
175                 .nb_max = ICE_MAX_RING_DESC,
176                 .nb_min = ICE_MIN_RING_DESC,
177                 .nb_align = ICE_ALIGN_RING_DESC,
178         };
179
180         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
181                 .nb_max = ICE_MAX_RING_DESC,
182                 .nb_min = ICE_MIN_RING_DESC,
183                 .nb_align = ICE_ALIGN_RING_DESC,
184         };
185
186         dev_info->switch_info.name = dcf_hw->eth_dev->device->name;
187         dev_info->switch_info.domain_id = repr->switch_domain_id;
188         dev_info->switch_info.port_id = repr->vf_id;
189
190         return 0;
191 }
192
193 static int
194 ice_dcf_vlan_offload_config(struct ice_dcf_vf_repr *repr,
195                             struct virtchnl_dcf_vlan_offload *vlan_offload)
196 {
197         struct dcf_virtchnl_cmd args;
198
199         memset(&args, 0, sizeof(args));
200         args.v_op = VIRTCHNL_OP_DCF_VLAN_OFFLOAD;
201         args.req_msg = (uint8_t *)vlan_offload;
202         args.req_msglen = sizeof(*vlan_offload);
203
204         return ice_dcf_execute_virtchnl_cmd(&repr->dcf_adapter->real_hw, &args);
205 }
206
207 static __rte_always_inline bool
208 ice_dcf_vlan_offload_ena(struct ice_dcf_vf_repr *repr)
209 {
210         return !!(repr->dcf_adapter->real_hw.vf_res->vf_cap_flags &
211                   VIRTCHNL_VF_OFFLOAD_VLAN_V2);
212 }
213
214 static int
215 ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev *dev,
216                               uint16_t pvid, int on)
217 {
218         struct ice_dcf_vf_repr *repr = dev->data->dev_private;
219         struct virtchnl_dcf_vlan_offload vlan_offload;
220         int err;
221
222         if (!ice_dcf_vlan_offload_ena(repr))
223                 return -ENOTSUP;
224
225         if (on && (pvid == 0 || pvid > RTE_ETHER_MAX_VLAN_ID))
226                 return -EINVAL;
227
228         memset(&vlan_offload, 0, sizeof(vlan_offload));
229
230         vlan_offload.vf_id = repr->vf_id;
231         vlan_offload.tpid = repr->port_vlan_info.tpid;
232         vlan_offload.vlan_flags = (VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
233                                    VIRTCHNL_DCF_VLAN_TYPE_S) |
234                                   (VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED <<
235                                    VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
236         vlan_offload.vlan_id = on ? pvid : 0;
237
238         err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
239         if (!err) {
240                 if (on) {
241                         repr->port_vlan_ena = true;
242                         repr->port_vlan_info.vid = pvid;
243                 } else {
244                         repr->port_vlan_ena = false;
245                 }
246         }
247
248         return err;
249 }
250
251 static int
252 ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
253                               enum rte_vlan_type vlan_type, uint16_t tpid)
254 {
255         struct ice_dcf_vf_repr *repr = dev->data->dev_private;
256         int err = 0;
257
258         if (!ice_dcf_vlan_offload_ena(repr))
259                 return -ENOTSUP;
260
261         if (vlan_type != ETH_VLAN_TYPE_OUTER) {
262                 PMD_DRV_LOG(ERR,
263                             "Can accelerate only outer VLAN in QinQ\n");
264                 return -EINVAL;
265         }
266
267         if (tpid != RTE_ETHER_TYPE_QINQ &&
268             tpid != RTE_ETHER_TYPE_VLAN &&
269             tpid != RTE_ETHER_TYPE_QINQ1) {
270                 PMD_DRV_LOG(ERR,
271                             "Invalid TPID: 0x%04x\n", tpid);
272                 return -EINVAL;
273         }
274
275         repr->port_vlan_info.tpid = tpid;
276
277         if (repr->port_vlan_ena)
278                 err = ice_dcf_vf_repr_vlan_pvid_set(dev,
279                                                     repr->port_vlan_info.vid,
280                                                     true);
281         return err;
282 }
283
284 static const struct eth_dev_ops ice_dcf_vf_repr_dev_ops = {
285         .dev_configure        = ice_dcf_vf_repr_dev_configure,
286         .dev_start            = ice_dcf_vf_repr_dev_start,
287         .dev_stop             = ice_dcf_vf_repr_dev_stop,
288         .dev_close            = ice_dcf_vf_repr_dev_close,
289         .dev_infos_get        = ice_dcf_vf_repr_dev_info_get,
290         .rx_queue_setup       = ice_dcf_vf_repr_rx_queue_setup,
291         .tx_queue_setup       = ice_dcf_vf_repr_tx_queue_setup,
292         .promiscuous_enable   = ice_dcf_vf_repr_promiscuous_enable,
293         .promiscuous_disable  = ice_dcf_vf_repr_promiscuous_disable,
294         .allmulticast_enable  = ice_dcf_vf_repr_allmulticast_enable,
295         .allmulticast_disable = ice_dcf_vf_repr_allmulticast_disable,
296         .link_update          = ice_dcf_vf_repr_link_update,
297         .vlan_pvid_set        = ice_dcf_vf_repr_vlan_pvid_set,
298         .vlan_tpid_set        = ice_dcf_vf_repr_vlan_tpid_set,
299 };
300
301 int
302 ice_dcf_vf_repr_init(struct rte_eth_dev *ethdev, void *init_param)
303 {
304         struct ice_dcf_vf_repr *repr = ethdev->data->dev_private;
305         struct ice_dcf_vf_repr_param *param = init_param;
306
307         repr->dcf_adapter = param->adapter;
308         repr->switch_domain_id = param->switch_domain_id;
309         repr->vf_id = param->vf_id;
310         repr->port_vlan_ena = false;
311         repr->port_vlan_info.tpid = RTE_ETHER_TYPE_VLAN;
312
313         ethdev->dev_ops = &ice_dcf_vf_repr_dev_ops;
314
315         ethdev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst;
316         ethdev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
317
318         ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
319         ethdev->data->representor_id = repr->vf_id;
320
321         ethdev->data->mac_addrs = &repr->mac_addr;
322
323         rte_eth_random_addr(repr->mac_addr.addr_bytes);
324
325         return 0;
326 }
327
328 int
329 ice_dcf_vf_repr_uninit(struct rte_eth_dev *ethdev)
330 {
331         ethdev->data->mac_addrs = NULL;
332
333         return 0;
334 }