1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
6 #include "base/common.h"
10 * cxgbe_l2t_release - Release associated L2T entry
11 * @e: L2T entry to release
13 * Releases ref count and frees up an L2T entry from L2T table
15 void cxgbe_l2t_release(struct l2t_entry *e)
17 if (rte_atomic32_read(&e->refcnt) != 0)
18 rte_atomic32_dec(&e->refcnt);
22 * Process a CPL_L2T_WRITE_RPL. Note that the TID in the reply is really
23 * the L2T index it refers to.
25 void cxgbe_do_l2t_write_rpl(struct adapter *adap,
26 const struct cpl_l2t_write_rpl *rpl)
28 struct l2t_data *d = adap->l2t;
29 unsigned int tid = GET_TID(rpl);
30 unsigned int l2t_idx = tid % L2T_SIZE;
32 if (unlikely(rpl->status != CPL_ERR_NONE)) {
34 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
35 rpl->status, l2t_idx);
39 if (tid & F_SYNC_WR) {
40 struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
43 if (e->state != L2T_STATE_SWITCHING)
44 e->state = L2T_STATE_VALID;
45 t4_os_unlock(&e->lock);
50 * Write an L2T entry. Must be called with the entry locked.
51 * The write may be synchronous or asynchronous.
53 static int write_l2e(struct rte_eth_dev *dev, struct l2t_entry *e, int sync,
54 bool loopback, bool arpmiss)
56 struct adapter *adap = ethdev2adap(dev);
57 struct l2t_data *d = adap->l2t;
58 struct rte_mbuf *mbuf;
59 struct cpl_l2t_write_req *req;
60 struct sge_ctrl_txq *ctrlq;
61 unsigned int l2t_idx = e->idx + d->l2t_start;
62 unsigned int port_id = ethdev2pinfo(dev)->port_id;
64 ctrlq = &adap->sge.ctrlq[port_id];
65 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
69 mbuf->data_len = sizeof(*req);
70 mbuf->pkt_len = mbuf->data_len;
72 req = rte_pktmbuf_mtod(mbuf, struct cpl_l2t_write_req *);
76 cpu_to_be32(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
77 l2t_idx | V_SYNC_WR(sync) |
78 V_TID_QID(adap->sge.fw_evtq.abs_id)));
79 req->params = cpu_to_be16(V_L2T_W_PORT(e->lport) |
80 V_L2T_W_LPBK(loopback) |
81 V_L2T_W_ARPMISS(arpmiss) |
82 V_L2T_W_NOREPLY(!sync));
83 req->l2t_idx = cpu_to_be16(l2t_idx);
84 req->vlan = cpu_to_be16(e->vlan);
85 rte_memcpy(req->dst_mac, e->dmac, RTE_ETHER_ADDR_LEN);
88 memset(req->dst_mac, 0, RTE_ETHER_ADDR_LEN);
90 t4_mgmt_tx(ctrlq, mbuf);
92 if (sync && e->state != L2T_STATE_SWITCHING)
93 e->state = L2T_STATE_SYNC_WRITE;
99 * find_or_alloc_l2e - Find/Allocate a free L2T entry
101 * @vlan: VLAN id to compare/add
102 * @port: port id to compare/add
103 * @dmac: Destination MAC address to compare/add
104 * Returns pointer to the L2T entry found/created
106 * Finds/Allocates an L2T entry to be used by switching rule of a filter.
108 static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
111 struct l2t_entry *end, *e;
112 struct l2t_entry *first_free = NULL;
114 for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
115 if (rte_atomic32_read(&e->refcnt) == 0) {
119 if (e->state == L2T_STATE_SWITCHING) {
120 if ((!memcmp(e->dmac, dmac, RTE_ETHER_ADDR_LEN)) &&
121 e->vlan == vlan && e->lport == port)
135 e->state = L2T_STATE_UNUSED;
141 static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
145 struct adapter *adap = ethdev2adap(dev);
146 struct l2t_data *d = adap->l2t;
150 t4_os_write_lock(&d->lock);
151 e = find_or_alloc_l2e(d, vlan, port, eth_addr);
153 t4_os_lock(&e->lock);
154 if (!rte_atomic32_read(&e->refcnt)) {
155 e->state = L2T_STATE_SWITCHING;
158 rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
159 rte_atomic32_set(&e->refcnt, 1);
160 ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
162 dev_debug(adap, "Failed to write L2T entry: %d",
165 rte_atomic32_inc(&e->refcnt);
167 t4_os_unlock(&e->lock);
169 t4_os_write_unlock(&d->lock);
171 return ret ? NULL : e;
175 * cxgbe_l2t_alloc_switching - Allocate a L2T entry for switching rule
176 * @dev: rte_eth_dev pointer
178 * @port: Associated port
179 * @dmac: Destination MAC address to add to L2T
180 * Returns pointer to the allocated l2t entry
182 * Allocates a L2T entry for use by switching rule of a filter
184 struct l2t_entry *cxgbe_l2t_alloc_switching(struct rte_eth_dev *dev, u16 vlan,
187 return t4_l2t_alloc_switching(dev, vlan, port, dmac);
191 * Initialize L2 Table
193 struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
195 unsigned int l2t_size;
199 if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
201 l2t_size = l2t_end - l2t_start + 1;
203 d = t4_os_alloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
207 d->l2t_start = l2t_start;
208 d->l2t_size = l2t_size;
210 t4_os_rwlock_init(&d->lock);
212 for (i = 0; i < d->l2t_size; ++i) {
214 d->l2tab[i].state = L2T_STATE_UNUSED;
215 t4_os_lock_init(&d->l2tab[i].lock);
216 rte_atomic32_set(&d->l2tab[i].refcnt, 0);
225 void t4_cleanup_l2t(struct adapter *adap)
228 t4_os_free(adap->l2t);