1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #include <rte_memzone.h>
7 #include <rte_memcpy.h>
8 #include <rte_string_fns.h>
12 #include "vnic_resource.h"
13 #include "vnic_devcmd.h"
15 #include "vnic_stats.h"
16 #include "vnic_flowman.h"
19 enum vnic_proxy_type {
31 struct vnic_intr_coal_timer_info {
39 struct rte_pci_device *pdev;
40 struct vnic_res res[RES_TYPE_MAX];
41 enum vnic_dev_intr_mode intr_mode;
42 struct vnic_devcmd __iomem *devcmd;
43 struct vnic_devcmd_notify *notify;
44 struct vnic_devcmd_notify notify_copy;
47 dma_addr_t linkstatus_pa;
48 struct vnic_stats *stats;
50 struct vnic_devcmd_fw_info *fw_info;
51 dma_addr_t fw_info_pa;
52 struct fm_info *flowman_info;
53 dma_addr_t flowman_info_pa;
54 enum vnic_proxy_type proxy;
56 uint64_t args[VNIC_DEVCMD_NARGS];
58 struct vnic_intr_coal_timer_info intr_coal_timer_info;
59 void *(*alloc_consistent)(void *priv, size_t size,
60 dma_addr_t *dma_handle, uint8_t *name);
61 void (*free_consistent)(void *priv,
62 size_t size, void *vaddr,
63 dma_addr_t dma_handle);
66 #define VNIC_MAX_RES_HDR_SIZE \
67 (sizeof(struct vnic_resource_header) + \
68 sizeof(struct vnic_resource) * RES_TYPE_MAX)
69 #define VNIC_RES_STRIDE 128
71 void *vnic_dev_priv(struct vnic_dev *vdev)
76 void vnic_register_cbacks(struct vnic_dev *vdev,
77 void *(*alloc_consistent)(void *priv, size_t size,
78 dma_addr_t *dma_handle, uint8_t *name),
79 void (*free_consistent)(void *priv,
80 size_t size, void *vaddr,
81 dma_addr_t dma_handle))
83 vdev->alloc_consistent = alloc_consistent;
84 vdev->free_consistent = free_consistent;
87 static int vnic_dev_discover_res(struct vnic_dev *vdev,
88 struct vnic_dev_bar *bar, unsigned int num_bars)
90 struct vnic_resource_header __iomem *rh;
91 struct mgmt_barmap_hdr __iomem *mrh;
92 struct vnic_resource __iomem *r;
98 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
99 pr_err("vNIC BAR0 res hdr length error\n");
106 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
110 /* Check for mgmt vnic in addition to normal vnic */
111 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
112 (ioread32(&rh->version) != VNIC_RES_VERSION)) {
113 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
114 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
115 pr_err("vNIC BAR0 res magic/version error " \
116 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
117 VNIC_RES_MAGIC, VNIC_RES_VERSION,
118 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
119 ioread32(&rh->magic), ioread32(&rh->version));
124 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
125 r = (struct vnic_resource __iomem *)(mrh + 1);
127 r = (struct vnic_resource __iomem *)(rh + 1);
130 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
131 uint8_t bar_num = ioread8(&r->bar);
132 uint32_t bar_offset = ioread32(&r->bar_offset);
133 uint32_t count = ioread32(&r->count);
138 if (bar_num >= num_bars)
141 if (!bar[bar_num].len || !bar[bar_num].vaddr)
148 case RES_TYPE_INTR_CTRL:
149 /* each count is stride bytes long */
150 len = count * VNIC_RES_STRIDE;
151 if (len + bar_offset > bar[bar_num].len) {
152 pr_err("vNIC BAR0 resource %d " \
153 "out-of-bounds, offset 0x%x + " \
154 "size 0x%x > bar len 0x%lx\n",
161 case RES_TYPE_INTR_PBA_LEGACY:
162 case RES_TYPE_DEVCMD:
169 vdev->res[type].count = count;
170 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
172 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
178 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
179 enum vnic_res_type type)
181 return vdev->res[type].count;
184 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
187 if (!vdev->res[type].vaddr)
194 case RES_TYPE_INTR_CTRL:
195 return (char __iomem *)vdev->res[type].vaddr +
196 index * VNIC_RES_STRIDE;
198 return (char __iomem *)vdev->res[type].vaddr;
202 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
203 unsigned int desc_count, unsigned int desc_size)
205 /* The base address of the desc rings must be 512 byte aligned.
206 * Descriptor count is aligned to groups of 32 descriptors. A
207 * count of 0 means the maximum 4096 descriptors. Descriptor
208 * size is aligned to 16 bytes.
211 unsigned int count_align = 32;
212 unsigned int desc_align = 16;
214 ring->base_align = 512;
219 ring->desc_count = VNIC_ALIGN(desc_count, count_align);
221 ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
223 ring->size = ring->desc_count * ring->desc_size;
224 ring->size_unaligned = ring->size + ring->base_align;
226 return ring->size_unaligned;
229 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
231 memset(ring->descs, 0, ring->size);
234 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev,
235 struct vnic_dev_ring *ring,
236 unsigned int desc_count, unsigned int desc_size,
237 __rte_unused unsigned int socket_id,
241 dma_addr_t alloc_pa = 0;
243 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
244 alloc_addr = vdev->alloc_consistent(vdev->priv,
245 ring->size_unaligned,
246 &alloc_pa, (uint8_t *)z_name);
248 pr_err("Failed to allocate ring (size=%d), aborting\n",
252 ring->descs_unaligned = alloc_addr;
254 pr_err("Failed to map allocated ring (size=%d), aborting\n",
256 vdev->free_consistent(vdev->priv,
257 ring->size_unaligned,
262 ring->base_addr_unaligned = alloc_pa;
264 ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned,
266 ring->descs = (uint8_t *)ring->descs_unaligned +
267 (ring->base_addr - ring->base_addr_unaligned);
269 vnic_dev_clear_desc_ring(ring);
271 ring->desc_avail = ring->desc_count - 1;
276 void vnic_dev_free_desc_ring(__rte_unused struct vnic_dev *vdev,
277 struct vnic_dev_ring *ring)
280 vdev->free_consistent(vdev->priv,
281 ring->size_unaligned,
282 ring->descs_unaligned,
283 ring->base_addr_unaligned);
288 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
291 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
297 status = ioread32(&devcmd->status);
298 if (status == 0xFFFFFFFF) {
299 /* PCI-e target device is gone */
302 if (status & STAT_BUSY) {
304 pr_err("Busy devcmd %d\n", _CMD_N(cmd));
308 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
309 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
310 writeq(vdev->args[i], &devcmd->args[i]);
311 rte_wmb(); /* complete all writes initiated till now */
314 iowrite32(cmd, &devcmd->cmd);
316 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
319 for (delay = 0; delay < wait; delay++) {
323 status = ioread32(&devcmd->status);
324 if (status == 0xFFFFFFFF) {
325 /* PCI-e target device is gone */
329 if (!(status & STAT_BUSY)) {
330 if (status & STAT_ERROR) {
331 err = -(int)readq(&devcmd->args[0]);
332 if (cmd != CMD_CAPABILITY &&
333 cmd != CMD_OVERLAY_OFFLOAD_CTRL &&
334 cmd != CMD_GET_SUPP_FEATURE_VER)
335 pr_err("Devcmd %d failed " \
336 "with error code %d\n",
341 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
342 rte_rmb();/* finish all reads */
343 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
344 vdev->args[i] = readq(&devcmd->args[i]);
351 pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
355 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
356 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
357 uint64_t *args, int nargs, int wait)
363 * Proxy command consumes 2 arguments. One for proxy index,
364 * the other is for command to be proxied
366 if (nargs > VNIC_DEVCMD_NARGS - 2) {
367 pr_err("number of args %d exceeds the maximum\n", nargs);
370 memset(vdev->args, 0, sizeof(vdev->args));
372 vdev->args[0] = vdev->proxy_index;
374 memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
376 err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
380 status = (uint32_t)vdev->args[0];
381 if (status & STAT_ERROR) {
382 err = (int)vdev->args[1];
383 if (err != ERR_ECMDUNKNOWN ||
384 cmd != CMD_CAPABILITY)
385 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
389 memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
394 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
395 enum vnic_devcmd_cmd cmd, uint64_t *args, int nargs, int wait)
399 if (nargs > VNIC_DEVCMD_NARGS) {
400 pr_err("number of args %d exceeds the maximum\n", nargs);
403 memset(vdev->args, 0, sizeof(vdev->args));
404 memcpy(vdev->args, args, nargs * sizeof(args[0]));
406 err = _vnic_dev_cmd(vdev, cmd, wait);
408 memcpy(args, vdev->args, nargs * sizeof(args[0]));
413 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
414 uint64_t *a0, uint64_t *a1, int wait)
421 memset(vdev->args, 0, sizeof(vdev->args));
423 switch (vdev->proxy) {
425 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
426 args, ARRAY_SIZE(args), wait);
429 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
430 args, ARRAY_SIZE(args), wait);
434 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
446 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
447 uint64_t *args, int nargs, int wait)
449 switch (vdev->proxy) {
451 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
454 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
458 return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
462 int vnic_dev_fw_info(struct vnic_dev *vdev,
463 struct vnic_devcmd_fw_info **fw_info)
465 char name[RTE_MEMZONE_NAMESIZE];
469 static uint32_t instance;
471 if (!vdev->fw_info) {
472 snprintf((char *)name, sizeof(name), "vnic_fw_info-%u",
474 vdev->fw_info = vdev->alloc_consistent(vdev->priv,
475 sizeof(struct vnic_devcmd_fw_info),
476 &vdev->fw_info_pa, (uint8_t *)name);
479 a0 = vdev->fw_info_pa;
480 a1 = sizeof(struct vnic_devcmd_fw_info);
481 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
484 *fw_info = vdev->fw_info;
488 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, uint64_t *args,
491 memset(args, 0, nargs * sizeof(*args));
492 args[0] = CMD_ADD_ADV_FILTER;
493 args[1] = FILTER_CAP_MODE_V1_FLAG;
494 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
497 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
499 uint64_t a0 = CMD_ADD_ADV_FILTER, a1 = 0;
503 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
506 return (a1 >= (uint32_t)FILTER_DPDK_1);
509 int vnic_dev_flowman_cmd(struct vnic_dev *vdev, uint64_t *args, int nargs)
513 return vnic_dev_cmd_args(vdev, CMD_FLOW_MANAGER_OP, args, nargs, wait);
516 static int vnic_dev_flowman_enable(struct vnic_dev *vdev, uint32_t *mode,
517 uint8_t *filter_actions)
519 char name[RTE_MEMZONE_NAMESIZE];
522 static uint32_t instance;
524 /* flowman devcmd available? */
525 if (!vnic_dev_capable(vdev, CMD_FLOW_MANAGER_OP))
527 /* Have the version we are using? */
528 args[0] = FM_API_VERSION_QUERY;
529 if (vnic_dev_flowman_cmd(vdev, args, 1))
531 if ((args[0] & (1ULL << FM_VERSION)) == 0)
533 /* Select the version */
534 args[0] = FM_API_VERSION_SELECT;
535 args[1] = FM_VERSION;
536 if (vnic_dev_flowman_cmd(vdev, args, 2))
538 /* Can we get fm_info? */
539 if (!vdev->flowman_info) {
540 snprintf((char *)name, sizeof(name), "vnic_fm_info-%u",
542 vdev->flowman_info = vdev->alloc_consistent(vdev->priv,
543 sizeof(struct fm_info),
544 &vdev->flowman_info_pa, (uint8_t *)name);
545 if (!vdev->flowman_info)
548 args[0] = FM_INFO_QUERY;
549 args[1] = vdev->flowman_info_pa;
550 args[2] = sizeof(struct fm_info);
551 if (vnic_dev_flowman_cmd(vdev, args, 3))
553 /* Have required operations? */
554 ops = (1ULL << FMOP_END) |
555 (1ULL << FMOP_DROP) |
556 (1ULL << FMOP_RQ_STEER) |
557 (1ULL << FMOP_EXACT_MATCH) |
558 (1ULL << FMOP_MARK) |
560 (1ULL << FMOP_EG_HAIRPIN) |
561 (1ULL << FMOP_ENCAP) |
562 (1ULL << FMOP_DECAP_NOSTRIP);
563 if ((vdev->flowman_info->fm_op_mask & ops) != ops)
565 /* Good to use flowman now */
566 *mode = FILTER_FLOWMAN;
567 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG |
568 FILTER_ACTION_FILTER_ID_FLAG |
569 FILTER_ACTION_COUNTER_FLAG |
570 FILTER_ACTION_DROP_FLAG;
574 /* Determine the "best" filtering mode VIC is capaible of. Returns one of 4
575 * value or 0 on error:
576 * FILTER_FLOWMAN- flowman api capable
577 * FILTER_DPDK_1- advanced filters availabile
578 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
579 * the IP layer must explicitly specified. I.e. cannot have a UDP
580 * filter that matches both IPv4 and IPv6.
581 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
582 * all other filter types are not available.
583 * Retrun true in filter_tags if supported
585 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, uint32_t *mode,
586 uint8_t *filter_actions)
590 uint32_t max_level = 0;
592 /* If flowman is available, use it as it is the most capable API */
593 if (vnic_dev_flowman_enable(vdev, mode, filter_actions))
596 err = vnic_dev_advanced_filters_cap(vdev, args, 4);
598 /* determine supported filter actions */
599 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
600 if (args[2] == FILTER_CAP_MODE_V1)
601 *filter_actions = args[3];
603 if (err || ((args[0] == 1) && (args[1] == 0))) {
604 /* Adv filter Command not supported or adv filters available but
605 * not enabled. Try the normal filter capability command.
607 args[0] = CMD_ADD_FILTER;
609 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
613 goto parse_max_level;
614 } else if (args[2] == FILTER_CAP_MODE_V1) {
615 /* parse filter capability mask in args[1] */
616 if (args[1] & FILTER_DPDK_1_FLAG)
617 *mode = FILTER_DPDK_1;
618 else if (args[1] & FILTER_USNIC_IP_FLAG)
619 *mode = FILTER_USNIC_IP;
620 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
621 *mode = FILTER_IPV4_5TUPLE;
626 if (max_level >= (uint32_t)FILTER_USNIC_IP)
627 *mode = FILTER_USNIC_IP;
629 *mode = FILTER_IPV4_5TUPLE;
633 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
636 uint64_t a0 = CMD_NIC_CFG, a1 = 0;
642 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
643 if (err == 0 && a0 != 0 && a1 != 0) {
645 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
649 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
651 uint64_t a0 = (uint32_t)cmd, a1 = 0;
655 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
660 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
670 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
674 *(uint8_t *)value = (uint8_t)a0;
677 *(uint16_t *)value = (uint16_t)a0;
680 *(uint32_t *)value = (uint32_t)a0;
683 *(uint64_t *)value = a0;
693 int vnic_dev_stats_clear(struct vnic_dev *vdev)
695 uint64_t a0 = 0, a1 = 0;
698 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
701 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
709 *stats = vdev->stats;
711 a1 = sizeof(struct vnic_stats);
713 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
716 int vnic_dev_close(struct vnic_dev *vdev)
718 uint64_t a0 = 0, a1 = 0;
721 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
724 int vnic_dev_enable_wait(struct vnic_dev *vdev)
726 uint64_t a0 = 0, a1 = 0;
729 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
730 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
732 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
735 int vnic_dev_disable(struct vnic_dev *vdev)
737 uint64_t a0 = 0, a1 = 0;
740 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
743 int vnic_dev_open(struct vnic_dev *vdev, int arg)
745 uint64_t a0 = (uint32_t)arg, a1 = 0;
748 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
751 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
753 uint64_t a0 = 0, a1 = 0;
759 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
768 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr)
770 uint64_t a0 = 0, a1 = 0;
774 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
777 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
781 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
782 mac_addr[i] = ((uint8_t *)&a0)[i];
787 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
788 int broadcast, int promisc, int allmulti)
794 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
795 (multicast ? CMD_PFILTER_MULTICAST : 0) |
796 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
797 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
798 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
800 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
802 pr_err("Can't set packet filter\n");
807 int vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr)
809 uint64_t a0 = 0, a1 = 0;
814 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
815 ((uint8_t *)&a0)[i] = addr[i];
817 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
819 pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
820 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
826 int vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr)
828 uint64_t a0 = 0, a1 = 0;
833 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
834 ((uint8_t *)&a0)[i] = addr[i];
836 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
838 pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
839 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
845 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
846 uint8_t ig_vlan_rewrite_mode)
848 uint64_t a0 = ig_vlan_rewrite_mode, a1 = 0;
851 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
852 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
858 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
860 vdev->in_reset = state;
863 static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
865 return vdev->in_reset;
868 int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
869 void *notify_addr, dma_addr_t notify_pa, uint16_t intr)
875 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
876 if (!vnic_dev_in_reset(vdev)) {
877 vdev->notify = notify_addr;
878 vdev->notify_pa = notify_pa;
881 a0 = (uint64_t)notify_pa;
882 a1 = ((uint64_t)intr << 32) & 0x0000ffff00000000ULL;
883 a1 += sizeof(struct vnic_devcmd_notify);
885 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
886 if (!vnic_dev_in_reset(vdev))
887 vdev->notify_sz = (r == 0) ? (uint32_t)a1 : 0;
892 int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr)
894 void *notify_addr = NULL;
895 dma_addr_t notify_pa = 0;
896 char name[RTE_MEMZONE_NAMESIZE];
897 static uint32_t instance;
899 if (vdev->notify || vdev->notify_pa) {
900 return vnic_dev_notify_setcmd(vdev, vdev->notify,
901 vdev->notify_pa, intr);
903 if (!vnic_dev_in_reset(vdev)) {
904 snprintf((char *)name, sizeof(name),
905 "vnic_notify-%u", instance++);
906 notify_addr = vdev->alloc_consistent(vdev->priv,
907 sizeof(struct vnic_devcmd_notify),
908 ¬ify_pa, (uint8_t *)name);
913 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
916 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
922 a0 = 0; /* paddr = 0 to unset notify buffer */
923 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
924 a1 += sizeof(struct vnic_devcmd_notify);
926 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
927 if (!vnic_dev_in_reset(vdev)) {
936 int vnic_dev_notify_unset(struct vnic_dev *vdev)
938 if (vdev->notify && !vnic_dev_in_reset(vdev)) {
939 vdev->free_consistent(vdev->priv,
940 sizeof(struct vnic_devcmd_notify),
945 return vnic_dev_notify_unsetcmd(vdev);
948 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
951 unsigned int nwords = vdev->notify_sz / 4;
955 if (!vdev->notify || !vdev->notify_sz)
960 rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
961 words = (uint32_t *)&vdev->notify_copy;
962 for (i = 1; i < nwords; i++)
964 } while (csum != words[0]);
969 int vnic_dev_init(struct vnic_dev *vdev, int arg)
971 uint64_t a0 = (uint32_t)arg, a1 = 0;
975 if (vnic_dev_capable(vdev, CMD_INIT))
976 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
978 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
979 if (a0 & CMD_INITF_DEFAULT_MAC) {
980 /* Emulate these for old CMD_INIT_v1 which
981 * didn't pass a0 so no CMD_INITF_*.
983 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
984 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
990 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
992 /* Default: hardware intr coal timer is in units of 1.5 usecs */
993 vdev->intr_coal_timer_info.mul = 2;
994 vdev->intr_coal_timer_info.div = 3;
995 vdev->intr_coal_timer_info.max_usec =
996 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
999 int vnic_dev_link_status(struct vnic_dev *vdev)
1001 if (!vnic_dev_notify_ready(vdev))
1004 return vdev->notify_copy.link_state;
1007 uint32_t vnic_dev_port_speed(struct vnic_dev *vdev)
1009 if (!vnic_dev_notify_ready(vdev))
1012 return vdev->notify_copy.port_speed;
1015 uint32_t vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev,
1018 return (usec * vdev->intr_coal_timer_info.mul) /
1019 vdev->intr_coal_timer_info.div;
1022 uint32_t vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev,
1025 return (hw_cycles * vdev->intr_coal_timer_info.div) /
1026 vdev->intr_coal_timer_info.mul;
1029 uint32_t vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
1031 return vdev->intr_coal_timer_info.max_usec;
1034 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
1036 char name[RTE_MEMZONE_NAMESIZE];
1037 static uint32_t instance;
1039 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
1040 vdev->stats = vdev->alloc_consistent(vdev->priv,
1041 sizeof(struct vnic_stats),
1042 &vdev->stats_pa, (uint8_t *)name);
1043 return vdev->stats == NULL ? -ENOMEM : 0;
1046 void vnic_dev_unregister(struct vnic_dev *vdev)
1050 vdev->free_consistent(vdev->priv,
1051 sizeof(struct vnic_devcmd_notify),
1055 vdev->free_consistent(vdev->priv,
1056 sizeof(struct vnic_stats),
1057 vdev->stats, vdev->stats_pa);
1058 if (vdev->flowman_info)
1059 vdev->free_consistent(vdev->priv,
1060 sizeof(struct fm_info),
1061 vdev->flowman_info, vdev->flowman_info_pa);
1063 vdev->free_consistent(vdev->priv,
1064 sizeof(struct vnic_devcmd_fw_info),
1065 vdev->fw_info, vdev->fw_info_pa);
1070 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
1071 void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
1072 unsigned int num_bars)
1075 char name[RTE_MEMZONE_NAMESIZE];
1076 snprintf((char *)name, sizeof(name), "%s-vnic",
1078 vdev = (struct vnic_dev *)rte_zmalloc_socket(name,
1079 sizeof(struct vnic_dev),
1080 RTE_CACHE_LINE_SIZE,
1081 pdev->device.numa_node);
1089 if (vnic_dev_discover_res(vdev, bar, num_bars))
1092 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
1099 vnic_dev_unregister(vdev);
1104 * vnic_dev_classifier: Add/Delete classifier entries
1105 * @vdev: vdev of the device
1106 * @cmd: CLSF_ADD for Add filter
1107 * CLSF_DEL for Delete filter
1108 * @entry: In case of ADD filter, the caller passes the RQ number in this
1110 * This function stores the filter_id returned by the
1111 * firmware in the same variable before return;
1113 * In case of DEL filter, the caller passes the RQ number. Return
1114 * value is irrelevant.
1115 * @data: filter data
1116 * @action: action data
1118 int vnic_dev_classifier(struct vnic_dev *vdev, uint8_t cmd, uint16_t *entry,
1119 struct filter_v2 *data, struct filter_action_v2 *action_v2)
1121 uint64_t a0 = 0, a1 = 0;
1125 struct filter_tlv *tlv, *tlv_va;
1127 uint32_t filter_size, action_size;
1128 static unsigned int unique_id;
1129 char z_name[RTE_MEMZONE_NAMESIZE];
1130 enum vnic_devcmd_cmd dev_cmd;
1132 if (cmd == CLSF_ADD) {
1133 dev_cmd = (data->type >= FILTER_DPDK_1) ?
1134 CMD_ADD_ADV_FILTER : CMD_ADD_FILTER;
1136 filter_size = vnic_filter_size(data);
1137 action_size = vnic_action_size(action_v2);
1139 tlv_size = filter_size + action_size +
1140 2*sizeof(struct filter_tlv);
1141 snprintf((char *)z_name, sizeof(z_name),
1142 "vnic_clsf_%u", unique_id++);
1143 tlv_va = vdev->alloc_consistent(vdev->priv,
1144 tlv_size, &tlv_pa, (uint8_t *)z_name);
1150 memset(tlv, 0, tlv_size);
1151 tlv->type = CLSF_TLV_FILTER;
1152 tlv->length = filter_size;
1153 memcpy(&tlv->val, (void *)data, filter_size);
1155 tlv = (struct filter_tlv *)((char *)tlv +
1156 sizeof(struct filter_tlv) +
1159 tlv->type = CLSF_TLV_ACTION;
1160 tlv->length = action_size;
1161 memcpy(&tlv->val, (void *)action_v2, action_size);
1162 ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
1163 *entry = (uint16_t)a0;
1164 vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
1165 } else if (cmd == CLSF_DEL) {
1167 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
1173 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, uint8_t overlay,
1176 uint64_t a0 = overlay;
1177 uint64_t a1 = config;
1180 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
1183 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, uint8_t overlay,
1184 uint16_t vxlan_udp_port_number)
1186 uint64_t a1 = vxlan_udp_port_number;
1187 uint64_t a0 = overlay;
1190 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
1193 int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
1195 uint64_t a0 = VIC_FEATURE_VXLAN;
1200 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
1201 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
1203 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
1204 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
1207 int vnic_dev_capable_geneve(struct vnic_dev *vdev)
1209 uint64_t a0 = VIC_FEATURE_GENEVE;
1214 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
1215 return ret == 0 && (a1 & FEATURE_GENEVE_OPTIONS);