1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
14 #include "vnic_intr.h"
15 #include "vnic_stats.h"
19 #include "cq_enet_desc.h"
21 #include <sys/queue.h>
22 #include <rte_spinlock.h>
24 #define DRV_NAME "enic_pmd"
25 #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
26 #define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
28 #define VLAN_ETH_HLEN 18
30 #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
32 #define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */
33 #define ENIC_CALC_IP_CKSUM 1
34 #define ENIC_CALC_TCP_UDP_CKSUM 2
35 #define ENIC_MAX_MTU 9000
36 #define ENIC_PAGE_SIZE 4096
37 #define PAGE_ROUND_UP(x) \
38 ((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
40 /* must be >= VNIC_COUNTER_DMA_MIN_PERIOD */
41 #define VNIC_FLOW_COUNTER_UPDATE_MSECS 500
43 #define ENICPMD_VFIO_PATH "/dev/vfio/vfio"
44 /*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
46 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
47 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
49 /* Special Filter id for non-specific packet flagging. Don't change value */
50 #define ENIC_MAGIC_FILTER_ID 0xffff
52 #define ENICPMD_FDIR_MAX 64
54 /* HW default VXLAN port */
55 #define ENIC_DEFAULT_VXLAN_PORT 4789
58 * Interrupt 0: LSC and errors
59 * Interrupt 1: rx queue 0
60 * Interrupt 2: rx queue 1
63 #define ENICPMD_LSC_INTR_OFFSET 0
64 #define ENICPMD_RXQ_INTR_OFFSET 1
66 struct enic_fdir_node {
67 struct rte_eth_fdir_filter filter;
73 struct rte_eth_fdir_stats stats;
74 struct rte_hash *hash;
75 struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
78 void (*copy_fltr_fn)(struct filter_v2 *filt,
79 struct rte_eth_fdir_input *input,
80 struct rte_eth_fdir_masks *masks);
83 struct enic_soft_stats {
84 rte_atomic64_t rx_nombuf;
85 rte_atomic64_t rx_packet_errors;
86 rte_atomic64_t tx_oversized;
89 struct enic_memzone_entry {
90 const struct rte_memzone *rz;
91 LIST_ENTRY(enic_memzone_entry) entries;
95 LIST_ENTRY(rte_flow) next;
97 struct filter_v2 enic_filter;
98 int counter_idx; /* NIC allocated counter index (-1 = invalid) */
101 /* Per-instance private data structure */
104 struct rte_pci_device *pdev;
105 struct vnic_enet_config config;
106 struct vnic_dev_bar bar0;
107 struct vnic_dev *vdev;
110 * mbuf_initializer contains 64 bits of mbuf rearm_data, used by
111 * the avx2 handler at this time.
113 uint64_t mbuf_initializer;
114 unsigned int port_id;
115 bool overlay_offload;
116 struct rte_eth_dev *rte_dev;
117 struct enic_fdir fdir;
118 char bdf_name[ENICPMD_BDF_LENGTH];
123 uint8_t mac_addr[ETH_ALEN];
124 pthread_t err_intr_thread;
132 u32 flow_filter_mode;
133 u8 filter_actions; /* HW supported actions */
135 bool disable_overlay; /* devargs disable_overlay=1 */
136 uint8_t enable_avx2_rx; /* devargs enable-avx2-rx=1 */
137 bool nic_cfg_chk; /* NIC_CFG_CHK available */
138 bool udp_rss_weak; /* Bodega style UDP RSS */
139 uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
140 uint16_t vxlan_port; /* current vxlan port pushed to NIC */
143 unsigned int priv_flags;
145 /* work queue (len = conf_wq_count) */
147 unsigned int wq_count; /* equals eth_dev nb_tx_queues */
149 /* receive queue (len = conf_rq_count) */
151 unsigned int rq_count; /* equals eth_dev nb_rx_queues */
153 /* completion queue (len = conf_cq_count) */
155 unsigned int cq_count; /* equals rq_count + wq_count */
157 /* interrupt vectors (len = conf_intr_count) */
158 struct vnic_intr *intr;
159 unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
161 /* software counters */
162 struct enic_soft_stats soft_stats;
164 /* configured resources on vic */
165 unsigned int conf_rq_count;
166 unsigned int conf_wq_count;
167 unsigned int conf_cq_count;
168 unsigned int conf_intr_count;
170 /* linked list storing memory allocations */
171 LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
172 rte_spinlock_t memzone_list_lock;
173 rte_spinlock_t mtu_lock;
175 LIST_HEAD(enic_flows, rte_flow) flows;
176 int max_flow_counter;
177 rte_spinlock_t flows_lock;
181 uint8_t hash_key_size;
182 uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
184 * Keep a copy of current RSS config for queries, as we cannot retrieve
187 uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
189 uint64_t rss_hf; /* ETH_RSS flags */
190 union vnic_rss_key rss_key;
191 union vnic_rss_cpu rss_cpu;
193 uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
194 uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
195 uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
196 uint64_t tx_offload_mask; /* PKT_TX flags accepted */
198 /* Multicast MAC addresses added to the NIC */
200 struct ether_addr mc_addrs[ENIC_MULTICAST_PERFECT_FILTERS];
203 /* Compute ethdev's max packet size from MTU */
204 static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
206 /* ethdev max size includes eth and crc whereas NIC MTU does not */
207 return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
210 /* Get the CQ index from a Start of Packet(SOP) RQ index */
211 static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
216 /* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
217 static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
222 /* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
223 static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
228 /* Get the Data RQ index from a RTE RQ index */
229 static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx)
231 return rte_idx * 2 + 1;
234 static inline unsigned int enic_vnic_rq_count(struct enic *enic)
236 return enic->rq_count * 2;
239 static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
241 /* Scatter rx uses two receive queues together with one
242 * completion queue, so the completion queue number is no
243 * longer the same as the rq number.
248 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
250 return enic->rq_count + wq;
253 static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
255 return (struct enic *)eth_dev->data->dev_private;
258 static inline uint32_t
259 enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
261 uint32_t d = i0 + i1;
262 d -= (d >= n_descriptors) ? n_descriptors : 0;
266 static inline uint32_t
267 enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
270 return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
273 static inline uint32_t
274 enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
277 if (unlikely(idx == n_descriptors))
282 void enic_fdir_stats_get(struct enic *enic,
283 struct rte_eth_fdir_stats *stats);
284 int enic_fdir_add_fltr(struct enic *enic,
285 struct rte_eth_fdir_filter *params);
286 int enic_fdir_del_fltr(struct enic *enic,
287 struct rte_eth_fdir_filter *params);
288 void enic_free_wq(void *txq);
289 int enic_alloc_intr_resources(struct enic *enic);
290 int enic_setup_finish(struct enic *enic);
291 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
292 unsigned int socket_id, uint16_t nb_desc);
293 void enic_start_wq(struct enic *enic, uint16_t queue_idx);
294 int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
295 void enic_start_rq(struct enic *enic, uint16_t queue_idx);
296 int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
297 void enic_free_rq(void *rxq);
298 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
299 unsigned int socket_id, struct rte_mempool *mp,
300 uint16_t nb_desc, uint16_t free_thresh);
301 int enic_set_vnic_res(struct enic *enic);
302 int enic_init_rss_nic_cfg(struct enic *enic);
303 int enic_set_rss_conf(struct enic *enic,
304 struct rte_eth_rss_conf *rss_conf);
305 int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
306 int enic_set_vlan_strip(struct enic *enic);
307 int enic_enable(struct enic *enic);
308 int enic_disable(struct enic *enic);
309 void enic_remove(struct enic *enic);
310 int enic_get_link_status(struct enic *enic);
311 int enic_dev_stats_get(struct enic *enic,
312 struct rte_eth_stats *r_stats);
313 void enic_dev_stats_clear(struct enic *enic);
314 void enic_add_packet_filter(struct enic *enic);
315 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
316 int enic_del_mac_address(struct enic *enic, int mac_index);
317 unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
318 void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
319 struct rte_mbuf *tx_pkt, unsigned short len,
320 uint8_t sop, uint8_t eop, uint8_t cq_entry,
321 uint16_t ol_flags, uint16_t vlan_tag);
323 void enic_post_wq_index(struct vnic_wq *wq);
324 int enic_probe(struct enic *enic);
325 int enic_clsf_init(struct enic *enic);
326 void enic_clsf_destroy(struct enic *enic);
327 uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
329 uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
331 uint16_t enic_dummy_recv_pkts(void *rx_queue,
332 struct rte_mbuf **rx_pkts,
334 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
336 uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
338 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
340 int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
341 int enic_link_update(struct enic *enic);
342 bool enic_use_vector_rx_handler(struct enic *enic);
343 void enic_fdir_info(struct enic *enic);
344 void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
345 void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
346 struct rte_eth_fdir_masks *masks);
347 void copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
348 struct rte_eth_fdir_masks *masks);
349 extern const struct rte_flow_ops enic_flow_ops;
350 #endif /* _ENIC_H_ */