common/cnxk: enable backpressure on CPT with inline inbound
[dpdk.git] / drivers / net / af_xdp / compat.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation.
3  */
4
5 #include <bpf/bpf.h>
6 #include <bpf/xsk.h>
7 #include <linux/version.h>
8 #include <poll.h>
9
10 #if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE && \
11         defined(RTE_LIBRTE_AF_XDP_PMD_SHARED_UMEM)
12 #define ETH_AF_XDP_SHARED_UMEM 1
13 #endif
14
15 #ifdef ETH_AF_XDP_SHARED_UMEM
16 static __rte_always_inline int
17 create_shared_socket(struct xsk_socket **xsk_ptr,
18                           const char *ifname,
19                           __u32 queue_id, struct xsk_umem *umem,
20                           struct xsk_ring_cons *rx,
21                           struct xsk_ring_prod *tx,
22                           struct xsk_ring_prod *fill,
23                           struct xsk_ring_cons *comp,
24                           const struct xsk_socket_config *config)
25 {
26         return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem, rx,
27                                                 tx, fill, comp, config);
28 }
29 #else
30 static __rte_always_inline int
31 create_shared_socket(struct xsk_socket **xsk_ptr __rte_unused,
32                           const char *ifname __rte_unused,
33                           __u32 queue_id __rte_unused,
34                           struct xsk_umem *umem __rte_unused,
35                           struct xsk_ring_cons *rx __rte_unused,
36                           struct xsk_ring_prod *tx __rte_unused,
37                           struct xsk_ring_prod *fill __rte_unused,
38                           struct xsk_ring_cons *comp __rte_unused,
39                           const struct xsk_socket_config *config __rte_unused)
40 {
41         return -1;
42 }
43 #endif
44
45 #ifdef XDP_USE_NEED_WAKEUP
46 static int
47 tx_syscall_needed(struct xsk_ring_prod *q)
48 {
49         return xsk_ring_prod__needs_wakeup(q);
50 }
51 #else
52 static int
53 tx_syscall_needed(struct xsk_ring_prod *q __rte_unused)
54 {
55         return 1;
56 }
57 #endif
58
59 #ifdef RTE_LIBRTE_AF_XDP_PMD_BPF_LINK
60 static int link_lookup(int ifindex, int *link_fd)
61 {
62         struct bpf_link_info link_info;
63         __u32 link_len;
64         __u32 id = 0;
65         int err;
66         int fd;
67
68         while (true) {
69                 err = bpf_link_get_next_id(id, &id);
70                 if (err) {
71                         if (errno == ENOENT) {
72                                 err = 0;
73                                 break;
74                         }
75                         break;
76                 }
77
78                 fd = bpf_link_get_fd_by_id(id);
79                 if (fd < 0) {
80                         if (errno == ENOENT)
81                                 continue;
82                         err = -errno;
83                         break;
84                 }
85
86                 link_len = sizeof(struct bpf_link_info);
87                 memset(&link_info, 0, link_len);
88                 err = bpf_obj_get_info_by_fd(fd, &link_info, &link_len);
89                 if (err) {
90                         close(fd);
91                         break;
92                 }
93                 if (link_info.type == BPF_LINK_TYPE_XDP) {
94                         if ((int)link_info.xdp.ifindex == ifindex) {
95                                 *link_fd = fd;
96                                 break;
97                         }
98                 }
99                 close(fd);
100         }
101
102         return err;
103 }
104
105 static bool probe_bpf_link(void)
106 {
107         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
108                             .flags = XDP_FLAGS_SKB_MODE);
109         struct bpf_load_program_attr prog_attr;
110         struct bpf_insn insns[2];
111         int prog_fd, link_fd = -1;
112         int ifindex_lo = 1;
113         bool ret = false;
114         int err;
115
116         err = link_lookup(ifindex_lo, &link_fd);
117         if (err)
118                 return ret;
119
120         if (link_fd >= 0)
121                 return true;
122
123         /* BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), */
124         insns[0].code = BPF_ALU64 | BPF_MOV | BPF_K;
125         insns[0].dst_reg = BPF_REG_0;
126         insns[0].imm = XDP_PASS;
127
128         /* BPF_EXIT_INSN() */
129         insns[1].code = BPF_JMP | BPF_EXIT;
130
131         memset(&prog_attr, 0, sizeof(prog_attr));
132         prog_attr.prog_type = BPF_PROG_TYPE_XDP;
133         prog_attr.insns = insns;
134         prog_attr.insns_cnt = RTE_DIM(insns);
135         prog_attr.license = "GPL";
136
137         prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
138         if (prog_fd < 0)
139                 return ret;
140
141         link_fd = bpf_link_create(prog_fd, ifindex_lo, BPF_XDP, &opts);
142         close(prog_fd);
143
144         if (link_fd >= 0) {
145                 ret = true;
146                 close(link_fd);
147         }
148
149         return ret;
150 }
151
152 static int link_xdp_program(int if_index, int prog_fd, bool use_bpf_link)
153 {
154         DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
155         int link_fd, ret = 0;
156
157         if (!use_bpf_link)
158                 return bpf_set_link_xdp_fd(if_index, prog_fd,
159                                            XDP_FLAGS_UPDATE_IF_NOEXIST);
160
161         opts.flags = 0;
162         link_fd = bpf_link_create(prog_fd, if_index, BPF_XDP, &opts);
163         if (link_fd < 0)
164                 ret = -1;
165
166         return ret;
167 }
168 #else
169 static bool probe_bpf_link(void)
170 {
171         return false;
172 }
173
174 static int link_xdp_program(int if_index, int prog_fd,
175                             bool use_bpf_link __rte_unused)
176 {
177         return bpf_set_link_xdp_fd(if_index, prog_fd,
178                                    XDP_FLAGS_UPDATE_IF_NOEXIST);
179 }
180 #endif