1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #include <rte_memzone.h>
7 #include <rte_memcpy.h>
8 #include <rte_string_fns.h>
11 #include "vnic_resource.h"
12 #include "vnic_devcmd.h"
14 #include "vnic_stats.h"
15 #include "vnic_flowman.h"
18 enum vnic_proxy_type {
30 struct vnic_intr_coal_timer_info {
38 struct rte_pci_device *pdev;
39 struct vnic_res res[RES_TYPE_MAX];
40 enum vnic_dev_intr_mode intr_mode;
41 struct vnic_devcmd __iomem *devcmd;
42 struct vnic_devcmd_notify *notify;
43 struct vnic_devcmd_notify notify_copy;
46 dma_addr_t linkstatus_pa;
47 struct vnic_stats *stats;
49 struct vnic_devcmd_fw_info *fw_info;
50 dma_addr_t fw_info_pa;
51 struct fm_info *flowman_info;
52 dma_addr_t flowman_info_pa;
53 enum vnic_proxy_type proxy;
55 u64 args[VNIC_DEVCMD_NARGS];
57 struct vnic_intr_coal_timer_info intr_coal_timer_info;
58 void *(*alloc_consistent)(void *priv, size_t size,
59 dma_addr_t *dma_handle, u8 *name);
60 void (*free_consistent)(void *priv,
61 size_t size, void *vaddr,
62 dma_addr_t dma_handle);
65 #define VNIC_MAX_RES_HDR_SIZE \
66 (sizeof(struct vnic_resource_header) + \
67 sizeof(struct vnic_resource) * RES_TYPE_MAX)
68 #define VNIC_RES_STRIDE 128
70 void *vnic_dev_priv(struct vnic_dev *vdev)
75 void vnic_register_cbacks(struct vnic_dev *vdev,
76 void *(*alloc_consistent)(void *priv, size_t size,
77 dma_addr_t *dma_handle, u8 *name),
78 void (*free_consistent)(void *priv,
79 size_t size, void *vaddr,
80 dma_addr_t dma_handle))
82 vdev->alloc_consistent = alloc_consistent;
83 vdev->free_consistent = free_consistent;
86 static int vnic_dev_discover_res(struct vnic_dev *vdev,
87 struct vnic_dev_bar *bar, unsigned int num_bars)
89 struct vnic_resource_header __iomem *rh;
90 struct mgmt_barmap_hdr __iomem *mrh;
91 struct vnic_resource __iomem *r;
97 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
98 pr_err("vNIC BAR0 res hdr length error\n");
105 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
109 /* Check for mgmt vnic in addition to normal vnic */
110 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
111 (ioread32(&rh->version) != VNIC_RES_VERSION)) {
112 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
113 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
114 pr_err("vNIC BAR0 res magic/version error " \
115 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
116 VNIC_RES_MAGIC, VNIC_RES_VERSION,
117 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
118 ioread32(&rh->magic), ioread32(&rh->version));
123 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
124 r = (struct vnic_resource __iomem *)(mrh + 1);
126 r = (struct vnic_resource __iomem *)(rh + 1);
129 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
130 u8 bar_num = ioread8(&r->bar);
131 u32 bar_offset = ioread32(&r->bar_offset);
132 u32 count = ioread32(&r->count);
137 if (bar_num >= num_bars)
140 if (!bar[bar_num].len || !bar[bar_num].vaddr)
147 case RES_TYPE_INTR_CTRL:
148 /* each count is stride bytes long */
149 len = count * VNIC_RES_STRIDE;
150 if (len + bar_offset > bar[bar_num].len) {
151 pr_err("vNIC BAR0 resource %d " \
152 "out-of-bounds, offset 0x%x + " \
153 "size 0x%x > bar len 0x%lx\n",
160 case RES_TYPE_INTR_PBA_LEGACY:
161 case RES_TYPE_DEVCMD:
168 vdev->res[type].count = count;
169 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
171 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
177 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
178 enum vnic_res_type type)
180 return vdev->res[type].count;
183 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
186 if (!vdev->res[type].vaddr)
193 case RES_TYPE_INTR_CTRL:
194 return (char __iomem *)vdev->res[type].vaddr +
195 index * VNIC_RES_STRIDE;
197 return (char __iomem *)vdev->res[type].vaddr;
201 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
202 unsigned int desc_count, unsigned int desc_size)
204 /* The base address of the desc rings must be 512 byte aligned.
205 * Descriptor count is aligned to groups of 32 descriptors. A
206 * count of 0 means the maximum 4096 descriptors. Descriptor
207 * size is aligned to 16 bytes.
210 unsigned int count_align = 32;
211 unsigned int desc_align = 16;
213 ring->base_align = 512;
218 ring->desc_count = VNIC_ALIGN(desc_count, count_align);
220 ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
222 ring->size = ring->desc_count * ring->desc_size;
223 ring->size_unaligned = ring->size + ring->base_align;
225 return ring->size_unaligned;
228 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
230 memset(ring->descs, 0, ring->size);
233 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev,
234 struct vnic_dev_ring *ring,
235 unsigned int desc_count, unsigned int desc_size,
236 __attribute__((unused)) unsigned int socket_id,
240 dma_addr_t alloc_pa = 0;
242 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
243 alloc_addr = vdev->alloc_consistent(vdev->priv,
244 ring->size_unaligned,
245 &alloc_pa, (u8 *)z_name);
247 pr_err("Failed to allocate ring (size=%d), aborting\n",
251 ring->descs_unaligned = alloc_addr;
253 pr_err("Failed to map allocated ring (size=%d), aborting\n",
255 vdev->free_consistent(vdev->priv,
256 ring->size_unaligned,
261 ring->base_addr_unaligned = alloc_pa;
263 ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned,
265 ring->descs = (u8 *)ring->descs_unaligned +
266 (ring->base_addr - ring->base_addr_unaligned);
268 vnic_dev_clear_desc_ring(ring);
270 ring->desc_avail = ring->desc_count - 1;
275 void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
276 struct vnic_dev_ring *ring)
279 vdev->free_consistent(vdev->priv,
280 ring->size_unaligned,
281 ring->descs_unaligned,
282 ring->base_addr_unaligned);
287 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
290 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
296 status = ioread32(&devcmd->status);
297 if (status == 0xFFFFFFFF) {
298 /* PCI-e target device is gone */
301 if (status & STAT_BUSY) {
303 pr_err("Busy devcmd %d\n", _CMD_N(cmd));
307 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
308 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
309 writeq(vdev->args[i], &devcmd->args[i]);
310 wmb(); /* complete all writes initiated till now */
313 iowrite32(cmd, &devcmd->cmd);
315 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
318 for (delay = 0; delay < wait; delay++) {
322 status = ioread32(&devcmd->status);
323 if (status == 0xFFFFFFFF) {
324 /* PCI-e target device is gone */
328 if (!(status & STAT_BUSY)) {
329 if (status & STAT_ERROR) {
330 err = -(int)readq(&devcmd->args[0]);
331 if (cmd != CMD_CAPABILITY &&
332 cmd != CMD_OVERLAY_OFFLOAD_CTRL &&
333 cmd != CMD_GET_SUPP_FEATURE_VER)
334 pr_err("Devcmd %d failed " \
335 "with error code %d\n",
340 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
341 rmb();/* finish all reads initiated till now */
342 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
343 vdev->args[i] = readq(&devcmd->args[i]);
350 pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
354 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
355 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
356 u64 *args, int nargs, int wait)
362 * Proxy command consumes 2 arguments. One for proxy index,
363 * the other is for command to be proxied
365 if (nargs > VNIC_DEVCMD_NARGS - 2) {
366 pr_err("number of args %d exceeds the maximum\n", nargs);
369 memset(vdev->args, 0, sizeof(vdev->args));
371 vdev->args[0] = vdev->proxy_index;
373 memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
375 err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
379 status = (u32)vdev->args[0];
380 if (status & STAT_ERROR) {
381 err = (int)vdev->args[1];
382 if (err != ERR_ECMDUNKNOWN ||
383 cmd != CMD_CAPABILITY)
384 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
388 memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
393 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
394 enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
398 if (nargs > VNIC_DEVCMD_NARGS) {
399 pr_err("number of args %d exceeds the maximum\n", nargs);
402 memset(vdev->args, 0, sizeof(vdev->args));
403 memcpy(vdev->args, args, nargs * sizeof(args[0]));
405 err = _vnic_dev_cmd(vdev, cmd, wait);
407 memcpy(args, vdev->args, nargs * sizeof(args[0]));
412 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
413 u64 *a0, u64 *a1, int wait)
420 memset(vdev->args, 0, sizeof(vdev->args));
422 switch (vdev->proxy) {
424 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
425 args, ARRAY_SIZE(args), wait);
428 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
429 args, ARRAY_SIZE(args), wait);
433 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
445 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
446 u64 *args, int nargs, int wait)
448 switch (vdev->proxy) {
450 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
453 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
457 return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
461 int vnic_dev_fw_info(struct vnic_dev *vdev,
462 struct vnic_devcmd_fw_info **fw_info)
470 if (!vdev->fw_info) {
471 snprintf((char *)name, sizeof(name), "vnic_fw_info-%u",
473 vdev->fw_info = vdev->alloc_consistent(vdev->priv,
474 sizeof(struct vnic_devcmd_fw_info),
475 &vdev->fw_info_pa, (u8 *)name);
478 a0 = vdev->fw_info_pa;
479 a1 = sizeof(struct vnic_devcmd_fw_info);
480 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
483 *fw_info = vdev->fw_info;
487 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
490 memset(args, 0, nargs * sizeof(*args));
491 args[0] = CMD_ADD_ADV_FILTER;
492 args[1] = FILTER_CAP_MODE_V1_FLAG;
493 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
496 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
498 u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
502 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
505 return (a1 >= (u32)FILTER_DPDK_1);
508 int vnic_dev_flowman_cmd(struct vnic_dev *vdev, u64 *args, int nargs)
512 return vnic_dev_cmd_args(vdev, CMD_FLOW_MANAGER_OP, args, nargs, wait);
515 static int vnic_dev_flowman_enable(struct vnic_dev *vdev, u32 *mode,
523 /* flowman devcmd available? */
524 if (!vnic_dev_capable(vdev, CMD_FLOW_MANAGER_OP))
526 /* Have the version we are using? */
527 args[0] = FM_API_VERSION_QUERY;
528 if (vnic_dev_flowman_cmd(vdev, args, 1))
530 if ((args[0] & (1ULL << FM_VERSION)) == 0)
532 /* Select the version */
533 args[0] = FM_API_VERSION_SELECT;
534 args[1] = FM_VERSION;
535 if (vnic_dev_flowman_cmd(vdev, args, 2))
537 /* Can we get fm_info? */
538 if (!vdev->flowman_info) {
539 snprintf((char *)name, sizeof(name), "vnic_flowman_info-%u",
541 vdev->flowman_info = vdev->alloc_consistent(vdev->priv,
542 sizeof(struct fm_info),
543 &vdev->flowman_info_pa, (u8 *)name);
544 if (!vdev->flowman_info)
547 args[0] = FM_INFO_QUERY;
548 args[1] = vdev->flowman_info_pa;
549 args[2] = sizeof(struct fm_info);
550 if (vnic_dev_flowman_cmd(vdev, args, 3))
552 /* Have required operations? */
553 ops = (1ULL << FMOP_END) |
554 (1ULL << FMOP_DROP) |
555 (1ULL << FMOP_RQ_STEER) |
556 (1ULL << FMOP_EXACT_MATCH) |
557 (1ULL << FMOP_MARK) |
559 (1ULL << FMOP_EG_HAIRPIN) |
560 (1ULL << FMOP_ENCAP) |
561 (1ULL << FMOP_DECAP_NOSTRIP);
562 if ((vdev->flowman_info->fm_op_mask & ops) != ops)
564 /* Good to use flowman now */
565 *mode = FILTER_FLOWMAN;
566 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG |
567 FILTER_ACTION_FILTER_ID_FLAG |
568 FILTER_ACTION_COUNTER_FLAG |
569 FILTER_ACTION_DROP_FLAG;
573 /* Determine the "best" filtering mode VIC is capaible of. Returns one of 4
574 * value or 0 on error:
575 * FILTER_FLOWMAN- flowman api capable
576 * FILTER_DPDK_1- advanced filters availabile
577 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
578 * the IP layer must explicitly specified. I.e. cannot have a UDP
579 * filter that matches both IPv4 and IPv6.
580 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
581 * all other filter types are not available.
582 * Retrun true in filter_tags if supported
584 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
591 /* If flowman is available, use it as it is the most capable API */
592 if (vnic_dev_flowman_enable(vdev, mode, filter_actions))
595 err = vnic_dev_advanced_filters_cap(vdev, args, 4);
597 /* determine supported filter actions */
598 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
599 if (args[2] == FILTER_CAP_MODE_V1)
600 *filter_actions = args[3];
602 if (err || ((args[0] == 1) && (args[1] == 0))) {
603 /* Adv filter Command not supported or adv filters available but
604 * not enabled. Try the normal filter capability command.
606 args[0] = CMD_ADD_FILTER;
608 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
612 goto parse_max_level;
613 } else if (args[2] == FILTER_CAP_MODE_V1) {
614 /* parse filter capability mask in args[1] */
615 if (args[1] & FILTER_DPDK_1_FLAG)
616 *mode = FILTER_DPDK_1;
617 else if (args[1] & FILTER_USNIC_IP_FLAG)
618 *mode = FILTER_USNIC_IP;
619 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
620 *mode = FILTER_IPV4_5TUPLE;
625 if (max_level >= (u32)FILTER_USNIC_IP)
626 *mode = FILTER_USNIC_IP;
628 *mode = FILTER_IPV4_5TUPLE;
632 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
635 u64 a0 = CMD_NIC_CFG, a1 = 0;
641 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
642 if (err == 0 && a0 != 0 && a1 != 0) {
644 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
648 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
650 u64 a0 = (u32)cmd, a1 = 0;
654 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
659 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
669 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
673 *(u8 *)value = (u8)a0;
676 *(u16 *)value = (u16)a0;
679 *(u32 *)value = (u32)a0;
692 int vnic_dev_stats_clear(struct vnic_dev *vdev)
697 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
700 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
708 *stats = vdev->stats;
710 a1 = sizeof(struct vnic_stats);
712 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
715 int vnic_dev_close(struct vnic_dev *vdev)
720 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
723 int vnic_dev_enable_wait(struct vnic_dev *vdev)
728 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
729 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
731 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
734 int vnic_dev_disable(struct vnic_dev *vdev)
739 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
742 int vnic_dev_open(struct vnic_dev *vdev, int arg)
744 u64 a0 = (u32)arg, a1 = 0;
747 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
750 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
758 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
767 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
773 for (i = 0; i < ETH_ALEN; i++)
776 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
780 for (i = 0; i < ETH_ALEN; i++)
781 mac_addr[i] = ((u8 *)&a0)[i];
786 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
787 int broadcast, int promisc, int allmulti)
793 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
794 (multicast ? CMD_PFILTER_MULTICAST : 0) |
795 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
796 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
797 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
799 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
801 pr_err("Can't set packet filter\n");
806 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
813 for (i = 0; i < ETH_ALEN; i++)
814 ((u8 *)&a0)[i] = addr[i];
816 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
818 pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
819 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
825 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
832 for (i = 0; i < ETH_ALEN; i++)
833 ((u8 *)&a0)[i] = addr[i];
835 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
837 pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
838 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
844 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
845 u8 ig_vlan_rewrite_mode)
847 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
850 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
851 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
857 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
859 vdev->in_reset = state;
862 static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
864 return vdev->in_reset;
867 int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
868 void *notify_addr, dma_addr_t notify_pa, u16 intr)
874 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
875 if (!vnic_dev_in_reset(vdev)) {
876 vdev->notify = notify_addr;
877 vdev->notify_pa = notify_pa;
881 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
882 a1 += sizeof(struct vnic_devcmd_notify);
884 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
885 if (!vnic_dev_in_reset(vdev))
886 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
891 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
893 void *notify_addr = NULL;
894 dma_addr_t notify_pa = 0;
898 if (vdev->notify || vdev->notify_pa) {
899 return vnic_dev_notify_setcmd(vdev, vdev->notify,
900 vdev->notify_pa, intr);
902 if (!vnic_dev_in_reset(vdev)) {
903 snprintf((char *)name, sizeof(name),
904 "vnic_notify-%u", instance++);
905 notify_addr = vdev->alloc_consistent(vdev->priv,
906 sizeof(struct vnic_devcmd_notify),
907 ¬ify_pa, (u8 *)name);
912 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
915 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
921 a0 = 0; /* paddr = 0 to unset notify buffer */
922 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
923 a1 += sizeof(struct vnic_devcmd_notify);
925 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
926 if (!vnic_dev_in_reset(vdev)) {
935 int vnic_dev_notify_unset(struct vnic_dev *vdev)
937 if (vdev->notify && !vnic_dev_in_reset(vdev)) {
938 vdev->free_consistent(vdev->priv,
939 sizeof(struct vnic_devcmd_notify),
944 return vnic_dev_notify_unsetcmd(vdev);
947 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
950 unsigned int nwords = vdev->notify_sz / 4;
954 if (!vdev->notify || !vdev->notify_sz)
959 rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
960 words = (u32 *)&vdev->notify_copy;
961 for (i = 1; i < nwords; i++)
963 } while (csum != words[0]);
968 int vnic_dev_init(struct vnic_dev *vdev, int arg)
970 u64 a0 = (u32)arg, a1 = 0;
974 if (vnic_dev_capable(vdev, CMD_INIT))
975 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
977 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
978 if (a0 & CMD_INITF_DEFAULT_MAC) {
979 /* Emulate these for old CMD_INIT_v1 which
980 * didn't pass a0 so no CMD_INITF_*.
982 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
983 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
989 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
991 /* Default: hardware intr coal timer is in units of 1.5 usecs */
992 vdev->intr_coal_timer_info.mul = 2;
993 vdev->intr_coal_timer_info.div = 3;
994 vdev->intr_coal_timer_info.max_usec =
995 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
998 int vnic_dev_link_status(struct vnic_dev *vdev)
1000 if (!vnic_dev_notify_ready(vdev))
1003 return vdev->notify_copy.link_state;
1006 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
1008 if (!vnic_dev_notify_ready(vdev))
1011 return vdev->notify_copy.port_speed;
1014 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
1016 return (usec * vdev->intr_coal_timer_info.mul) /
1017 vdev->intr_coal_timer_info.div;
1020 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
1022 return (hw_cycles * vdev->intr_coal_timer_info.div) /
1023 vdev->intr_coal_timer_info.mul;
1026 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
1028 return vdev->intr_coal_timer_info.max_usec;
1031 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
1033 char name[NAME_MAX];
1034 static u32 instance;
1036 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
1037 vdev->stats = vdev->alloc_consistent(vdev->priv,
1038 sizeof(struct vnic_stats),
1039 &vdev->stats_pa, (u8 *)name);
1040 return vdev->stats == NULL ? -ENOMEM : 0;
1043 void vnic_dev_unregister(struct vnic_dev *vdev)
1047 vdev->free_consistent(vdev->priv,
1048 sizeof(struct vnic_devcmd_notify),
1052 vdev->free_consistent(vdev->priv,
1053 sizeof(struct vnic_stats),
1054 vdev->stats, vdev->stats_pa);
1055 if (vdev->flowman_info)
1056 vdev->free_consistent(vdev->priv,
1057 sizeof(struct fm_info),
1058 vdev->flowman_info, vdev->flowman_info_pa);
1060 vdev->free_consistent(vdev->priv,
1061 sizeof(struct vnic_devcmd_fw_info),
1062 vdev->fw_info, vdev->fw_info_pa);
1067 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
1068 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
1069 unsigned int num_bars)
1072 char name[NAME_MAX];
1073 snprintf((char *)name, sizeof(name), "%s-vnic",
1075 vdev = (struct vnic_dev *)rte_zmalloc_socket(name,
1076 sizeof(struct vnic_dev),
1077 RTE_CACHE_LINE_SIZE,
1078 pdev->device.numa_node);
1086 if (vnic_dev_discover_res(vdev, bar, num_bars))
1089 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
1096 vnic_dev_unregister(vdev);
1101 * vnic_dev_classifier: Add/Delete classifier entries
1102 * @vdev: vdev of the device
1103 * @cmd: CLSF_ADD for Add filter
1104 * CLSF_DEL for Delete filter
1105 * @entry: In case of ADD filter, the caller passes the RQ number in this
1107 * This function stores the filter_id returned by the
1108 * firmware in the same variable before return;
1110 * In case of DEL filter, the caller passes the RQ number. Return
1111 * value is irrelevant.
1112 * @data: filter data
1113 * @action: action data
1115 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
1116 struct filter_v2 *data, struct filter_action_v2 *action_v2)
1122 struct filter_tlv *tlv, *tlv_va;
1124 u32 filter_size, action_size;
1125 static unsigned int unique_id;
1126 char z_name[RTE_MEMZONE_NAMESIZE];
1127 enum vnic_devcmd_cmd dev_cmd;
1129 if (cmd == CLSF_ADD) {
1130 dev_cmd = (data->type >= FILTER_DPDK_1) ?
1131 CMD_ADD_ADV_FILTER : CMD_ADD_FILTER;
1133 filter_size = vnic_filter_size(data);
1134 action_size = vnic_action_size(action_v2);
1136 tlv_size = filter_size + action_size +
1137 2*sizeof(struct filter_tlv);
1138 snprintf((char *)z_name, sizeof(z_name),
1139 "vnic_clsf_%u", unique_id++);
1140 tlv_va = vdev->alloc_consistent(vdev->priv,
1141 tlv_size, &tlv_pa, (u8 *)z_name);
1147 memset(tlv, 0, tlv_size);
1148 tlv->type = CLSF_TLV_FILTER;
1149 tlv->length = filter_size;
1150 memcpy(&tlv->val, (void *)data, filter_size);
1152 tlv = (struct filter_tlv *)((char *)tlv +
1153 sizeof(struct filter_tlv) +
1156 tlv->type = CLSF_TLV_ACTION;
1157 tlv->length = action_size;
1158 memcpy(&tlv->val, (void *)action_v2, action_size);
1159 ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
1161 vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
1162 } else if (cmd == CLSF_DEL) {
1164 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
1170 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
1176 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
1179 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
1180 u16 vxlan_udp_port_number)
1182 u64 a1 = vxlan_udp_port_number;
1186 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
1189 int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
1191 u64 a0 = VIC_FEATURE_VXLAN;
1196 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
1197 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
1199 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
1200 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
1203 int vnic_dev_capable_geneve(struct vnic_dev *vdev)
1205 u64 a0 = VIC_FEATURE_GENEVE;
1210 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
1211 return ret == 0 && (a1 & FEATURE_GENEVE_OPTIONS);