1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #include <rte_memzone.h>
7 #include <rte_memcpy.h>
8 #include <rte_string_fns.h>
11 #include "vnic_resource.h"
12 #include "vnic_devcmd.h"
14 #include "vnic_stats.h"
17 enum vnic_proxy_type {
29 struct vnic_intr_coal_timer_info {
37 struct rte_pci_device *pdev;
38 struct vnic_res res[RES_TYPE_MAX];
39 enum vnic_dev_intr_mode intr_mode;
40 struct vnic_devcmd __iomem *devcmd;
41 struct vnic_devcmd_notify *notify;
42 struct vnic_devcmd_notify notify_copy;
45 dma_addr_t linkstatus_pa;
46 struct vnic_stats *stats;
48 struct vnic_devcmd_fw_info *fw_info;
49 dma_addr_t fw_info_pa;
50 enum vnic_proxy_type proxy;
52 u64 args[VNIC_DEVCMD_NARGS];
54 struct vnic_intr_coal_timer_info intr_coal_timer_info;
55 void *(*alloc_consistent)(void *priv, size_t size,
56 dma_addr_t *dma_handle, u8 *name);
57 void (*free_consistent)(void *priv,
58 size_t size, void *vaddr,
59 dma_addr_t dma_handle);
60 struct vnic_counter_counts *flow_counters;
61 dma_addr_t flow_counters_pa;
62 u8 flow_counters_dma_active;
65 #define VNIC_MAX_RES_HDR_SIZE \
66 (sizeof(struct vnic_resource_header) + \
67 sizeof(struct vnic_resource) * RES_TYPE_MAX)
68 #define VNIC_RES_STRIDE 128
70 #define VNIC_MAX_FLOW_COUNTERS 2048
72 void *vnic_dev_priv(struct vnic_dev *vdev)
77 void vnic_register_cbacks(struct vnic_dev *vdev,
78 void *(*alloc_consistent)(void *priv, size_t size,
79 dma_addr_t *dma_handle, u8 *name),
80 void (*free_consistent)(void *priv,
81 size_t size, void *vaddr,
82 dma_addr_t dma_handle))
84 vdev->alloc_consistent = alloc_consistent;
85 vdev->free_consistent = free_consistent;
88 static int vnic_dev_discover_res(struct vnic_dev *vdev,
89 struct vnic_dev_bar *bar, unsigned int num_bars)
91 struct vnic_resource_header __iomem *rh;
92 struct mgmt_barmap_hdr __iomem *mrh;
93 struct vnic_resource __iomem *r;
99 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
100 pr_err("vNIC BAR0 res hdr length error\n");
107 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
111 /* Check for mgmt vnic in addition to normal vnic */
112 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
113 (ioread32(&rh->version) != VNIC_RES_VERSION)) {
114 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
115 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
116 pr_err("vNIC BAR0 res magic/version error " \
117 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
118 VNIC_RES_MAGIC, VNIC_RES_VERSION,
119 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
120 ioread32(&rh->magic), ioread32(&rh->version));
125 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
126 r = (struct vnic_resource __iomem *)(mrh + 1);
128 r = (struct vnic_resource __iomem *)(rh + 1);
131 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
132 u8 bar_num = ioread8(&r->bar);
133 u32 bar_offset = ioread32(&r->bar_offset);
134 u32 count = ioread32(&r->count);
139 if (bar_num >= num_bars)
142 if (!bar[bar_num].len || !bar[bar_num].vaddr)
149 case RES_TYPE_INTR_CTRL:
150 /* each count is stride bytes long */
151 len = count * VNIC_RES_STRIDE;
152 if (len + bar_offset > bar[bar_num].len) {
153 pr_err("vNIC BAR0 resource %d " \
154 "out-of-bounds, offset 0x%x + " \
155 "size 0x%x > bar len 0x%lx\n",
162 case RES_TYPE_INTR_PBA_LEGACY:
163 case RES_TYPE_DEVCMD:
170 vdev->res[type].count = count;
171 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
173 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
179 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
180 enum vnic_res_type type)
182 return vdev->res[type].count;
185 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
188 if (!vdev->res[type].vaddr)
195 case RES_TYPE_INTR_CTRL:
196 return (char __iomem *)vdev->res[type].vaddr +
197 index * VNIC_RES_STRIDE;
199 return (char __iomem *)vdev->res[type].vaddr;
203 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
204 unsigned int desc_count, unsigned int desc_size)
206 /* The base address of the desc rings must be 512 byte aligned.
207 * Descriptor count is aligned to groups of 32 descriptors. A
208 * count of 0 means the maximum 4096 descriptors. Descriptor
209 * size is aligned to 16 bytes.
212 unsigned int count_align = 32;
213 unsigned int desc_align = 16;
215 ring->base_align = 512;
220 ring->desc_count = VNIC_ALIGN(desc_count, count_align);
222 ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
224 ring->size = ring->desc_count * ring->desc_size;
225 ring->size_unaligned = ring->size + ring->base_align;
227 return ring->size_unaligned;
230 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
232 memset(ring->descs, 0, ring->size);
235 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev,
236 struct vnic_dev_ring *ring,
237 unsigned int desc_count, unsigned int desc_size,
238 __attribute__((unused)) unsigned int socket_id,
242 dma_addr_t alloc_pa = 0;
244 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
245 alloc_addr = vdev->alloc_consistent(vdev->priv,
246 ring->size_unaligned,
247 &alloc_pa, (u8 *)z_name);
249 pr_err("Failed to allocate ring (size=%d), aborting\n",
253 ring->descs_unaligned = alloc_addr;
255 pr_err("Failed to map allocated ring (size=%d), aborting\n",
257 vdev->free_consistent(vdev->priv,
258 ring->size_unaligned,
263 ring->base_addr_unaligned = alloc_pa;
265 ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned,
267 ring->descs = (u8 *)ring->descs_unaligned +
268 (ring->base_addr - ring->base_addr_unaligned);
270 vnic_dev_clear_desc_ring(ring);
272 ring->desc_avail = ring->desc_count - 1;
277 void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
278 struct vnic_dev_ring *ring)
281 vdev->free_consistent(vdev->priv,
282 ring->size_unaligned,
283 ring->descs_unaligned,
284 ring->base_addr_unaligned);
289 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
292 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
298 status = ioread32(&devcmd->status);
299 if (status == 0xFFFFFFFF) {
300 /* PCI-e target device is gone */
303 if (status & STAT_BUSY) {
305 pr_err("Busy devcmd %d\n", _CMD_N(cmd));
309 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
310 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
311 writeq(vdev->args[i], &devcmd->args[i]);
312 wmb(); /* complete all writes initiated till now */
315 iowrite32(cmd, &devcmd->cmd);
317 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
320 for (delay = 0; delay < wait; delay++) {
324 status = ioread32(&devcmd->status);
325 if (status == 0xFFFFFFFF) {
326 /* PCI-e target device is gone */
330 if (!(status & STAT_BUSY)) {
331 if (status & STAT_ERROR) {
332 err = -(int)readq(&devcmd->args[0]);
333 if (cmd != CMD_CAPABILITY)
334 pr_err("Devcmd %d failed " \
335 "with error code %d\n",
340 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
341 rmb();/* finish all reads initiated till now */
342 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
343 vdev->args[i] = readq(&devcmd->args[i]);
350 pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
354 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
355 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
356 u64 *args, int nargs, int wait)
362 * Proxy command consumes 2 arguments. One for proxy index,
363 * the other is for command to be proxied
365 if (nargs > VNIC_DEVCMD_NARGS - 2) {
366 pr_err("number of args %d exceeds the maximum\n", nargs);
369 memset(vdev->args, 0, sizeof(vdev->args));
371 vdev->args[0] = vdev->proxy_index;
373 memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
375 err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
379 status = (u32)vdev->args[0];
380 if (status & STAT_ERROR) {
381 err = (int)vdev->args[1];
382 if (err != ERR_ECMDUNKNOWN ||
383 cmd != CMD_CAPABILITY)
384 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
388 memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
393 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
394 enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
398 if (nargs > VNIC_DEVCMD_NARGS) {
399 pr_err("number of args %d exceeds the maximum\n", nargs);
402 memset(vdev->args, 0, sizeof(vdev->args));
403 memcpy(vdev->args, args, nargs * sizeof(args[0]));
405 err = _vnic_dev_cmd(vdev, cmd, wait);
407 memcpy(args, vdev->args, nargs * sizeof(args[0]));
412 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
413 u64 *a0, u64 *a1, int wait)
420 memset(vdev->args, 0, sizeof(vdev->args));
422 switch (vdev->proxy) {
424 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
425 args, ARRAY_SIZE(args), wait);
428 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
429 args, ARRAY_SIZE(args), wait);
433 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
445 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
446 u64 *args, int nargs, int wait)
448 switch (vdev->proxy) {
450 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
453 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
457 return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
461 int vnic_dev_fw_info(struct vnic_dev *vdev,
462 struct vnic_devcmd_fw_info **fw_info)
470 if (!vdev->fw_info) {
471 snprintf((char *)name, sizeof(name), "vnic_fw_info-%u",
473 vdev->fw_info = vdev->alloc_consistent(vdev->priv,
474 sizeof(struct vnic_devcmd_fw_info),
475 &vdev->fw_info_pa, (u8 *)name);
478 a0 = vdev->fw_info_pa;
479 a1 = sizeof(struct vnic_devcmd_fw_info);
480 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
483 *fw_info = vdev->fw_info;
487 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
490 memset(args, 0, nargs * sizeof(*args));
491 args[0] = CMD_ADD_ADV_FILTER;
492 args[1] = FILTER_CAP_MODE_V1_FLAG;
493 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
496 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
498 u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
502 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
505 return (a1 >= (u32)FILTER_DPDK_1);
508 /* Determine the "best" filtering mode VIC is capaible of. Returns one of 3
509 * value or 0 on error:
510 * FILTER_DPDK_1- advanced filters availabile
511 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
512 * the IP layer must explicitly specified. I.e. cannot have a UDP
513 * filter that matches both IPv4 and IPv6.
514 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
515 * all other filter types are not available.
516 * Retrun true in filter_tags if supported
518 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
525 err = vnic_dev_advanced_filters_cap(vdev, args, 4);
527 /* determine supported filter actions */
528 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
529 if (args[2] == FILTER_CAP_MODE_V1)
530 *filter_actions = args[3];
532 if (err || ((args[0] == 1) && (args[1] == 0))) {
533 /* Adv filter Command not supported or adv filters available but
534 * not enabled. Try the normal filter capability command.
536 args[0] = CMD_ADD_FILTER;
538 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
542 goto parse_max_level;
543 } else if (args[2] == FILTER_CAP_MODE_V1) {
544 /* parse filter capability mask in args[1] */
545 if (args[1] & FILTER_DPDK_1_FLAG)
546 *mode = FILTER_DPDK_1;
547 else if (args[1] & FILTER_USNIC_IP_FLAG)
548 *mode = FILTER_USNIC_IP;
549 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
550 *mode = FILTER_IPV4_5TUPLE;
555 if (max_level >= (u32)FILTER_USNIC_IP)
556 *mode = FILTER_USNIC_IP;
558 *mode = FILTER_IPV4_5TUPLE;
562 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
565 u64 a0 = CMD_NIC_CFG, a1 = 0;
571 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
572 if (err == 0 && a0 != 0 && a1 != 0) {
574 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
578 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
580 u64 a0 = (u32)cmd, a1 = 0;
584 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
589 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
599 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
603 *(u8 *)value = (u8)a0;
606 *(u16 *)value = (u16)a0;
609 *(u32 *)value = (u32)a0;
622 int vnic_dev_stats_clear(struct vnic_dev *vdev)
627 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
630 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
638 *stats = vdev->stats;
640 a1 = sizeof(struct vnic_stats);
642 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
646 * Configure counter DMA
648 int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
655 if (num_counters > VNIC_MAX_FLOW_COUNTERS)
657 if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD ||
661 args[0] = num_counters;
662 args[1] = vdev->flow_counters_pa;
664 err = vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait);
666 /* record if DMAs need to be stopped on close */
668 vdev->flow_counters_dma_active = (num_counters != 0 &&
674 int vnic_dev_close(struct vnic_dev *vdev)
679 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
682 int vnic_dev_enable_wait(struct vnic_dev *vdev)
687 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
688 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
690 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
693 int vnic_dev_disable(struct vnic_dev *vdev)
698 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
701 int vnic_dev_open(struct vnic_dev *vdev, int arg)
703 u64 a0 = (u32)arg, a1 = 0;
706 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
709 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
717 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
726 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
732 for (i = 0; i < ETH_ALEN; i++)
735 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
739 for (i = 0; i < ETH_ALEN; i++)
740 mac_addr[i] = ((u8 *)&a0)[i];
745 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
746 int broadcast, int promisc, int allmulti)
752 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
753 (multicast ? CMD_PFILTER_MULTICAST : 0) |
754 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
755 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
756 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
758 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
760 pr_err("Can't set packet filter\n");
765 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
772 for (i = 0; i < ETH_ALEN; i++)
773 ((u8 *)&a0)[i] = addr[i];
775 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
777 pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
778 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
784 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
791 for (i = 0; i < ETH_ALEN; i++)
792 ((u8 *)&a0)[i] = addr[i];
794 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
796 pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
797 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
803 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
804 u8 ig_vlan_rewrite_mode)
806 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
809 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
810 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
816 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
818 vdev->in_reset = state;
821 static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
823 return vdev->in_reset;
826 int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
827 void *notify_addr, dma_addr_t notify_pa, u16 intr)
833 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
834 if (!vnic_dev_in_reset(vdev)) {
835 vdev->notify = notify_addr;
836 vdev->notify_pa = notify_pa;
840 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
841 a1 += sizeof(struct vnic_devcmd_notify);
843 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
844 if (!vnic_dev_in_reset(vdev))
845 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
850 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
852 void *notify_addr = NULL;
853 dma_addr_t notify_pa = 0;
857 if (vdev->notify || vdev->notify_pa) {
858 return vnic_dev_notify_setcmd(vdev, vdev->notify,
859 vdev->notify_pa, intr);
861 if (!vnic_dev_in_reset(vdev)) {
862 snprintf((char *)name, sizeof(name),
863 "vnic_notify-%u", instance++);
864 notify_addr = vdev->alloc_consistent(vdev->priv,
865 sizeof(struct vnic_devcmd_notify),
866 ¬ify_pa, (u8 *)name);
871 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
874 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
880 a0 = 0; /* paddr = 0 to unset notify buffer */
881 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
882 a1 += sizeof(struct vnic_devcmd_notify);
884 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
885 if (!vnic_dev_in_reset(vdev)) {
894 int vnic_dev_notify_unset(struct vnic_dev *vdev)
896 if (vdev->notify && !vnic_dev_in_reset(vdev)) {
897 vdev->free_consistent(vdev->priv,
898 sizeof(struct vnic_devcmd_notify),
903 return vnic_dev_notify_unsetcmd(vdev);
906 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
909 unsigned int nwords = vdev->notify_sz / 4;
913 if (!vdev->notify || !vdev->notify_sz)
918 rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
919 words = (u32 *)&vdev->notify_copy;
920 for (i = 1; i < nwords; i++)
922 } while (csum != words[0]);
927 int vnic_dev_init(struct vnic_dev *vdev, int arg)
929 u64 a0 = (u32)arg, a1 = 0;
933 if (vnic_dev_capable(vdev, CMD_INIT))
934 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
936 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
937 if (a0 & CMD_INITF_DEFAULT_MAC) {
938 /* Emulate these for old CMD_INIT_v1 which
939 * didn't pass a0 so no CMD_INITF_*.
941 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
942 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
948 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
950 /* Default: hardware intr coal timer is in units of 1.5 usecs */
951 vdev->intr_coal_timer_info.mul = 2;
952 vdev->intr_coal_timer_info.div = 3;
953 vdev->intr_coal_timer_info.max_usec =
954 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
957 int vnic_dev_link_status(struct vnic_dev *vdev)
959 if (!vnic_dev_notify_ready(vdev))
962 return vdev->notify_copy.link_state;
965 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
967 if (!vnic_dev_notify_ready(vdev))
970 return vdev->notify_copy.port_speed;
973 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
975 return (usec * vdev->intr_coal_timer_info.mul) /
976 vdev->intr_coal_timer_info.div;
979 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
981 return (hw_cycles * vdev->intr_coal_timer_info.div) /
982 vdev->intr_coal_timer_info.mul;
985 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
987 return vdev->intr_coal_timer_info.max_usec;
990 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
995 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
996 vdev->stats = vdev->alloc_consistent(vdev->priv,
997 sizeof(struct vnic_stats),
998 &vdev->stats_pa, (u8 *)name);
999 return vdev->stats == NULL ? -ENOMEM : 0;
1003 * Initialize for up to VNIC_MAX_FLOW_COUNTERS
1005 int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev)
1007 char name[NAME_MAX];
1008 static u32 instance;
1010 snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++);
1011 vdev->flow_counters = vdev->alloc_consistent(vdev->priv,
1012 sizeof(struct vnic_counter_counts)
1013 * VNIC_MAX_FLOW_COUNTERS,
1014 &vdev->flow_counters_pa,
1016 vdev->flow_counters_dma_active = 0;
1017 return vdev->flow_counters == NULL ? -ENOMEM : 0;
1020 void vnic_dev_unregister(struct vnic_dev *vdev)
1024 vdev->free_consistent(vdev->priv,
1025 sizeof(struct vnic_devcmd_notify),
1029 vdev->free_consistent(vdev->priv,
1030 sizeof(struct vnic_stats),
1031 vdev->stats, vdev->stats_pa);
1032 if (vdev->flow_counters) {
1033 /* turn off counter DMAs before freeing memory */
1034 if (vdev->flow_counters_dma_active)
1035 vnic_dev_counter_dma_cfg(vdev, 0, 0);
1037 vdev->free_consistent(vdev->priv,
1038 sizeof(struct vnic_counter_counts)
1039 * VNIC_MAX_FLOW_COUNTERS,
1040 vdev->flow_counters, vdev->flow_counters_pa);
1043 vdev->free_consistent(vdev->priv,
1044 sizeof(struct vnic_devcmd_fw_info),
1045 vdev->fw_info, vdev->fw_info_pa);
1050 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
1051 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
1052 unsigned int num_bars)
1055 char name[NAME_MAX];
1056 snprintf((char *)name, sizeof(name), "%s-vnic",
1058 vdev = (struct vnic_dev *)rte_zmalloc_socket(name,
1059 sizeof(struct vnic_dev),
1060 RTE_CACHE_LINE_SIZE,
1061 pdev->device.numa_node);
1069 if (vnic_dev_discover_res(vdev, bar, num_bars))
1072 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
1079 vnic_dev_unregister(vdev);
1084 * vnic_dev_classifier: Add/Delete classifier entries
1085 * @vdev: vdev of the device
1086 * @cmd: CLSF_ADD for Add filter
1087 * CLSF_DEL for Delete filter
1088 * @entry: In case of ADD filter, the caller passes the RQ number in this
1090 * This function stores the filter_id returned by the
1091 * firmware in the same variable before return;
1093 * In case of DEL filter, the caller passes the RQ number. Return
1094 * value is irrelevant.
1095 * @data: filter data
1096 * @action: action data
1098 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
1099 struct filter_v2 *data, struct filter_action_v2 *action_v2)
1105 struct filter_tlv *tlv, *tlv_va;
1107 u32 filter_size, action_size;
1108 static unsigned int unique_id;
1109 char z_name[RTE_MEMZONE_NAMESIZE];
1110 enum vnic_devcmd_cmd dev_cmd;
1112 if (cmd == CLSF_ADD) {
1113 dev_cmd = (data->type >= FILTER_DPDK_1) ?
1114 CMD_ADD_ADV_FILTER : CMD_ADD_FILTER;
1116 filter_size = vnic_filter_size(data);
1117 action_size = vnic_action_size(action_v2);
1119 tlv_size = filter_size + action_size +
1120 2*sizeof(struct filter_tlv);
1121 snprintf((char *)z_name, sizeof(z_name),
1122 "vnic_clsf_%u", unique_id++);
1123 tlv_va = vdev->alloc_consistent(vdev->priv,
1124 tlv_size, &tlv_pa, (u8 *)z_name);
1130 memset(tlv, 0, tlv_size);
1131 tlv->type = CLSF_TLV_FILTER;
1132 tlv->length = filter_size;
1133 memcpy(&tlv->val, (void *)data, filter_size);
1135 tlv = (struct filter_tlv *)((char *)tlv +
1136 sizeof(struct filter_tlv) +
1139 tlv->type = CLSF_TLV_ACTION;
1140 tlv->length = action_size;
1141 memcpy(&tlv->val, (void *)action_v2, action_size);
1142 ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
1144 vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
1145 } else if (cmd == CLSF_DEL) {
1147 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
1153 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
1159 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
1162 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
1163 u16 vxlan_udp_port_number)
1165 u64 a1 = vxlan_udp_port_number;
1169 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
1172 int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
1174 u64 a0 = VIC_FEATURE_VXLAN;
1179 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
1180 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
1182 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
1183 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
1186 bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx)
1192 if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait))
1194 *idx = (uint32_t)a0;
1198 bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx)
1204 return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1,
1208 bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
1209 bool reset, uint64_t *packets, uint64_t *bytes)
1212 u64 a1 = reset ? 1 : 0;
1216 /* query/reset returns updated counters */
1217 if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait))
1222 /* Get values DMA'd from the adapter */
1223 *packets = vdev->flow_counters[idx].vcc_packets;
1224 *bytes = vdev->flow_counters[idx].vcc_bytes;