1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
9 #include "cxgbe_filter.h"
13 * Initialize Hash Filters
15 int init_hash_filter(struct adapter *adap)
17 unsigned int n_user_filters;
18 unsigned int user_filter_perc;
20 u32 params[7], val[7];
22 #define FW_PARAM_DEV(param) \
23 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
24 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
26 #define FW_PARAM_PFVF(param) \
27 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
28 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
29 V_FW_PARAMS_PARAM_Y(0) | \
30 V_FW_PARAMS_PARAM_Z(0))
32 params[0] = FW_PARAM_DEV(NTID);
33 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
37 adap->tids.ntids = val[0];
38 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
40 user_filter_perc = 100;
41 n_user_filters = mult_frac(adap->tids.nftids,
45 adap->tids.nftids = n_user_filters;
46 adap->params.hash_filter = 1;
51 * Validate if the requested filter specification can be set by checking
52 * if the requested features have been enabled
54 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
59 * Check for unconfigured fields being used.
61 fconf = adapter->params.tp.vlan_pri_map;
64 (fs->val._field || fs->mask._field)
65 #define U(_mask, _field) \
66 (!(fconf & (_mask)) && S(_field))
68 if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
75 * If the user is requesting that the filter action loop
76 * matching packets back out one of our ports, make sure that
77 * the egress port is in range.
79 if (fs->action == FILTER_SWITCH &&
80 fs->eport >= adapter->params.nports)
84 * Don't allow various trivially obvious bogus out-of-range
87 if (fs->val.iport >= adapter->params.nports)
94 * Get the queue to which the traffic must be steered to.
96 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
97 struct ch_filter_specification *fs)
99 struct port_info *pi = ethdev2pinfo(dev);
100 struct adapter *adapter = pi->adapter;
104 * If the user has requested steering matching Ingress Packets
105 * to a specific Queue Set, we need to make sure it's in range
106 * for the port and map that into the Absolute Queue ID of the
107 * Queue Set's Response Queue.
113 * If the iq id is greater than the number of qsets,
114 * then assume it is an absolute qid.
116 if (fs->iq < pi->n_rx_qsets)
117 iq = adapter->sge.ethrxq[pi->first_qset +
126 /* Return an error number if the indicated filter isn't writable ... */
127 int writable_filter(struct filter_entry *f)
138 * Send CPL_SET_TCB_FIELD message
140 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
141 u16 word, u64 mask, u64 val, int no_reply)
143 struct rte_mbuf *mbuf;
144 struct cpl_set_tcb_field *req;
145 struct sge_ctrl_txq *ctrlq;
147 ctrlq = &adapter->sge.ctrlq[0];
148 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
151 mbuf->data_len = sizeof(*req);
152 mbuf->pkt_len = mbuf->data_len;
154 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
155 memset(req, 0, sizeof(*req));
156 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
157 req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
158 V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
159 V_NO_REPLY(no_reply));
160 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
161 req->mask = cpu_to_be64(mask);
162 req->val = cpu_to_be64(val);
164 t4_mgmt_tx(ctrlq, mbuf);
168 * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
170 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
171 struct cpl_set_tcb_field *req,
173 u64 mask, u64 val, u8 cookie,
176 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
177 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
179 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
180 V_ULP_TXPKT_DEST(0));
181 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
182 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
183 sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
184 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
185 req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
187 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
188 req->mask = cpu_to_be64(mask);
189 req->val = cpu_to_be64(val);
190 sc = (struct ulptx_idata *)(req + 1);
191 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
192 sc->len = cpu_to_be32(0);
196 * Check if entry already filled.
198 bool is_filter_set(struct tid_info *t, int fidx, int family)
203 /* IPv6 requires four slots and IPv4 requires only 1 slot.
204 * Ensure, there's enough slots available.
206 max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
208 t4_os_lock(&t->ftid_lock);
209 for (i = fidx; i <= max; i++) {
210 if (rte_bitmap_get(t->ftid_bmap, i)) {
215 t4_os_unlock(&t->ftid_lock);
220 * Allocate a available free entry
222 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
224 struct tid_info *t = &adap->tids;
226 int size = t->nftids;
228 t4_os_lock(&t->ftid_lock);
229 if (family == FILTER_TYPE_IPV6)
230 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
232 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
233 t4_os_unlock(&t->ftid_lock);
235 return pos < size ? pos : -1;
239 * Construct hash filter ntuple.
241 static u64 hash_filter_ntuple(const struct filter_entry *f)
243 struct adapter *adap = ethdev2adap(f->dev);
244 struct tp_params *tp = &adap->params.tp;
246 u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
248 if (tp->port_shift >= 0)
249 ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
251 if (tp->protocol_shift >= 0) {
252 if (!f->fs.val.proto)
253 ntuple |= (u64)tcp_proto << tp->protocol_shift;
255 ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
258 if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
259 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
261 if (ntuple != tp->hash_filter_mask)
268 * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
270 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
273 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
274 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
276 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
277 V_ULP_TXPKT_DEST(0));
278 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
279 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
280 sc->len = cpu_to_be32(sizeof(*abort_req) -
281 sizeof(struct work_request_hdr));
282 OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
283 abort_req->rsvd0 = cpu_to_be32(0);
284 abort_req->rsvd1 = 0;
285 abort_req->cmd = CPL_ABORT_NO_RST;
286 sc = (struct ulptx_idata *)(abort_req + 1);
287 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
288 sc->len = cpu_to_be32(0);
292 * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
294 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
297 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
298 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
300 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
301 V_ULP_TXPKT_DEST(0));
302 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
303 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
304 sc->len = cpu_to_be32(sizeof(*abort_rpl) -
305 sizeof(struct work_request_hdr));
306 OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
307 abort_rpl->rsvd0 = cpu_to_be32(0);
308 abort_rpl->rsvd1 = 0;
309 abort_rpl->cmd = CPL_ABORT_NO_RST;
310 sc = (struct ulptx_idata *)(abort_rpl + 1);
311 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
312 sc->len = cpu_to_be32(0);
316 * Delete the specified hash filter.
318 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
319 unsigned int filter_id,
320 struct filter_ctx *ctx)
322 struct adapter *adapter = ethdev2adap(dev);
323 struct tid_info *t = &adapter->tids;
324 struct filter_entry *f;
325 struct sge_ctrl_txq *ctrlq;
326 unsigned int port_id = ethdev2pinfo(dev)->port_id;
329 if (filter_id > adapter->tids.ntids)
332 f = lookup_tid(t, filter_id);
334 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
335 __func__, filter_id);
339 ret = writable_filter(f);
345 struct rte_mbuf *mbuf;
346 struct work_request_hdr *wr;
347 struct ulptx_idata *aligner;
348 struct cpl_set_tcb_field *req;
349 struct cpl_abort_req *abort_req;
350 struct cpl_abort_rpl *abort_rpl;
355 wrlen = cxgbe_roundup(sizeof(*wr) +
356 (sizeof(*req) + sizeof(*aligner)) +
357 sizeof(*abort_req) + sizeof(*abort_rpl),
360 ctrlq = &adapter->sge.ctrlq[port_id];
361 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
363 dev_err(adapter, "%s: could not allocate skb ..\n",
368 mbuf->data_len = wrlen;
369 mbuf->pkt_len = mbuf->data_len;
371 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
372 INIT_ULPTX_WR(req, wrlen, 0, 0);
373 wr = (struct work_request_hdr *)req;
375 req = (struct cpl_set_tcb_field *)wr;
376 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
377 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
378 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
380 aligner = (struct ulptx_idata *)(req + 1);
381 abort_req = (struct cpl_abort_req *)(aligner + 1);
382 mk_abort_req_ulp(abort_req, f->tid);
383 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
384 mk_abort_rpl_ulp(abort_rpl, f->tid);
385 t4_mgmt_tx(ctrlq, mbuf);
394 * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
396 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
397 unsigned int qid_filterid, struct adapter *adap)
399 struct cpl_t6_act_open_req6 *req = NULL;
400 u64 local_lo, local_hi, peer_lo, peer_hi;
401 u32 *lip = (u32 *)f->fs.val.lip;
402 u32 *fip = (u32 *)f->fs.val.fip;
404 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
406 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
411 dev_err(adap, "%s: unsupported chip type!\n", __func__);
415 local_hi = ((u64)lip[1]) << 32 | lip[0];
416 local_lo = ((u64)lip[3]) << 32 | lip[2];
417 peer_hi = ((u64)fip[1]) << 32 | fip[0];
418 peer_lo = ((u64)fip[3]) << 32 | fip[2];
420 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
422 req->local_port = cpu_to_be16(f->fs.val.lport);
423 req->peer_port = cpu_to_be16(f->fs.val.fport);
424 req->local_ip_hi = local_hi;
425 req->local_ip_lo = local_lo;
426 req->peer_ip_hi = peer_hi;
427 req->peer_ip_lo = peer_lo;
428 req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
429 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
431 V_TX_CHAN(f->fs.eport) |
432 V_ULP_MODE(ULP_MODE_NONE) |
433 F_TCAM_BYPASS | F_NON_OFFLOAD);
434 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
435 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
436 V_RSS_QUEUE(f->fs.iq) |
439 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
440 (f->fs.dirsteer << 1)) |
441 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
445 * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
447 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
448 unsigned int qid_filterid, struct adapter *adap)
450 struct cpl_t6_act_open_req *req = NULL;
452 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
454 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
459 dev_err(adap, "%s: unsupported chip type!\n", __func__);
463 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
465 req->local_port = cpu_to_be16(f->fs.val.lport);
466 req->peer_port = cpu_to_be16(f->fs.val.fport);
467 req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
468 f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
469 req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
470 f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
471 req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
472 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
474 V_TX_CHAN(f->fs.eport) |
475 V_ULP_MODE(ULP_MODE_NONE) |
476 F_TCAM_BYPASS | F_NON_OFFLOAD);
477 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
478 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
479 V_RSS_QUEUE(f->fs.iq) |
482 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
483 (f->fs.dirsteer << 1)) |
484 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
488 * Set the specified hash filter.
490 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
491 struct ch_filter_specification *fs,
492 struct filter_ctx *ctx)
494 struct port_info *pi = ethdev2pinfo(dev);
495 struct adapter *adapter = pi->adapter;
496 struct tid_info *t = &adapter->tids;
497 struct filter_entry *f;
498 struct rte_mbuf *mbuf;
499 struct sge_ctrl_txq *ctrlq;
504 ret = validate_filter(adapter, fs);
508 iq = get_filter_steerq(dev, fs);
510 ctrlq = &adapter->sge.ctrlq[pi->port_id];
512 f = t4_os_alloc(sizeof(*f));
521 atid = cxgbe_alloc_atid(t, f);
526 /* IPv6 hash filter */
527 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
531 size = sizeof(struct cpl_t6_act_open_req6);
532 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
538 mbuf->data_len = size;
539 mbuf->pkt_len = mbuf->data_len;
541 mk_act_open_req6(f, mbuf,
542 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
545 /* IPv4 hash filter */
546 size = sizeof(struct cpl_t6_act_open_req);
547 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
553 mbuf->data_len = size;
554 mbuf->pkt_len = mbuf->data_len;
556 mk_act_open_req(f, mbuf,
557 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
562 t4_mgmt_tx(ctrlq, mbuf);
566 cxgbe_clip_release(f->dev, f->clipt);
568 cxgbe_free_atid(t, atid);
576 * Clear a filter and release any of its resources that we own. This also
577 * clears the filter's "pending" status.
579 void clear_filter(struct filter_entry *f)
582 cxgbe_clip_release(f->dev, f->clipt);
585 * The zeroing of the filter rule below clears the filter valid,
586 * pending, locked flags etc. so it's all we need for
589 memset(f, 0, sizeof(*f));
593 * t4_mk_filtdelwr - create a delete filter WR
594 * @ftid: the filter ID
595 * @wr: the filter work request to populate
596 * @qid: ingress queue to receive the delete notification
598 * Creates a filter work request to delete the supplied filter. If @qid is
599 * negative the delete notification is suppressed.
601 static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
603 memset(wr, 0, sizeof(*wr));
604 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
605 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
606 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
607 V_FW_FILTER_WR_NOREPLY(qid < 0));
608 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
610 wr->rx_chan_rx_rpl_iq =
611 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
615 * Create FW work request to delete the filter at a specified index
617 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
619 struct adapter *adapter = ethdev2adap(dev);
620 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
621 struct rte_mbuf *mbuf;
622 struct fw_filter_wr *fwr;
623 struct sge_ctrl_txq *ctrlq;
624 unsigned int port_id = ethdev2pinfo(dev)->port_id;
626 ctrlq = &adapter->sge.ctrlq[port_id];
627 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
631 mbuf->data_len = sizeof(*fwr);
632 mbuf->pkt_len = mbuf->data_len;
634 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
635 t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
638 * Mark the filter as "pending" and ship off the Filter Work Request.
639 * When we get the Work Request Reply we'll clear the pending status.
642 t4_mgmt_tx(ctrlq, mbuf);
646 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
648 struct adapter *adapter = ethdev2adap(dev);
649 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
650 struct rte_mbuf *mbuf;
651 struct fw_filter_wr *fwr;
652 struct sge_ctrl_txq *ctrlq;
653 unsigned int port_id = ethdev2pinfo(dev)->port_id;
656 ctrlq = &adapter->sge.ctrlq[port_id];
657 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
663 mbuf->data_len = sizeof(*fwr);
664 mbuf->pkt_len = mbuf->data_len;
666 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
667 memset(fwr, 0, sizeof(*fwr));
670 * Construct the work request to set the filter.
672 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
673 fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
675 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
676 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
677 V_FW_FILTER_WR_NOREPLY(0) |
678 V_FW_FILTER_WR_IQ(f->fs.iq));
679 fwr->del_filter_to_l2tix =
680 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
681 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
682 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
683 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
684 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
685 V_FW_FILTER_WR_PRIO(f->fs.prio));
686 fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
687 fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
689 fwr->rx_chan_rx_rpl_iq =
690 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
691 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
693 fwr->maci_to_matchtypem =
694 cpu_to_be32(V_FW_FILTER_WR_PORT(f->fs.val.iport) |
695 V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
696 fwr->ptcl = f->fs.val.proto;
697 fwr->ptclm = f->fs.mask.proto;
698 rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
699 rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
700 rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
701 rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
702 fwr->lp = cpu_to_be16(f->fs.val.lport);
703 fwr->lpm = cpu_to_be16(f->fs.mask.lport);
704 fwr->fp = cpu_to_be16(f->fs.val.fport);
705 fwr->fpm = cpu_to_be16(f->fs.mask.fport);
708 * Mark the filter as "pending" and ship off the Filter Work Request.
709 * When we get the Work Request Reply we'll clear the pending status.
712 t4_mgmt_tx(ctrlq, mbuf);
720 * Set the corresponding entry in the bitmap. 4 slots are
721 * marked for IPv6, whereas only 1 slot is marked for IPv4.
723 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
725 t4_os_lock(&t->ftid_lock);
726 if (rte_bitmap_get(t->ftid_bmap, fidx)) {
727 t4_os_unlock(&t->ftid_lock);
731 if (family == FILTER_TYPE_IPV4) {
732 rte_bitmap_set(t->ftid_bmap, fidx);
734 rte_bitmap_set(t->ftid_bmap, fidx);
735 rte_bitmap_set(t->ftid_bmap, fidx + 1);
736 rte_bitmap_set(t->ftid_bmap, fidx + 2);
737 rte_bitmap_set(t->ftid_bmap, fidx + 3);
739 t4_os_unlock(&t->ftid_lock);
744 * Clear the corresponding entry in the bitmap. 4 slots are
745 * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
747 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
749 t4_os_lock(&t->ftid_lock);
750 if (family == FILTER_TYPE_IPV4) {
751 rte_bitmap_clear(t->ftid_bmap, fidx);
753 rte_bitmap_clear(t->ftid_bmap, fidx);
754 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
755 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
756 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
758 t4_os_unlock(&t->ftid_lock);
762 * Check a delete filter request for validity and send it to the hardware.
763 * Return 0 on success, an error number otherwise. We attach any provided
764 * filter operation context to the internal filter specification in order to
765 * facilitate signaling completion of the operation.
767 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
768 struct ch_filter_specification *fs,
769 struct filter_ctx *ctx)
771 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
772 struct adapter *adapter = pi->adapter;
773 struct filter_entry *f;
774 unsigned int chip_ver;
777 if (is_hashfilter(adapter) && fs->cap)
778 return cxgbe_del_hash_filter(dev, filter_id, ctx);
780 if (filter_id >= adapter->tids.nftids)
783 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
785 ret = is_filter_set(&adapter->tids, filter_id, fs->type);
787 dev_warn(adap, "%s: could not find filter entry: %u\n",
788 __func__, filter_id);
793 * Ensure filter id is aligned on the 2 slot boundary for T6,
794 * and 4 slot boundary for cards below T6.
797 if (chip_ver < CHELSIO_T6)
803 f = &adapter->tids.ftid_tab[filter_id];
804 ret = writable_filter(f);
810 cxgbe_clear_ftid(&adapter->tids,
811 f->tid - adapter->tids.ftid_base,
812 f->fs.type ? FILTER_TYPE_IPV6 :
814 return del_filter_wr(dev, filter_id);
818 * If the caller has passed in a Completion Context then we need to
819 * mark it as a successful completion so they don't stall waiting
824 t4_complete(&ctx->completion);
831 * Check a Chelsio Filter Request for validity, convert it into our internal
832 * format and send it to the hardware. Return 0 on success, an error number
833 * otherwise. We attach any provided filter operation context to the internal
834 * filter specification in order to facilitate signaling completion of the
837 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
838 struct ch_filter_specification *fs,
839 struct filter_ctx *ctx)
841 struct port_info *pi = ethdev2pinfo(dev);
842 struct adapter *adapter = pi->adapter;
843 unsigned int fidx, iq, fid_bit = 0;
844 struct filter_entry *f;
845 unsigned int chip_ver;
846 uint8_t bitoff[16] = {0};
849 if (is_hashfilter(adapter) && fs->cap)
850 return cxgbe_set_hash_filter(dev, fs, ctx);
852 if (filter_id >= adapter->tids.nftids)
855 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
857 ret = validate_filter(adapter, fs);
862 * Ensure filter id is aligned on the 4 slot boundary for IPv6
868 ret = is_filter_set(&adapter->tids, filter_id, fs->type);
872 iq = get_filter_steerq(dev, fs);
875 * IPv6 filters occupy four slots and must be aligned on four-slot
876 * boundaries for T5. On T6, IPv6 filters occupy two-slots and
877 * must be aligned on two-slot boundaries.
879 * IPv4 filters only occupy a single slot and have no alignment
880 * requirements but writing a new IPv4 filter into the middle
881 * of an existing IPv6 filter requires clearing the old IPv6
884 if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
886 * For T6, If our IPv4 filter isn't being written to a
887 * multiple of two filter index and there's an IPv6
888 * filter at the multiple of 2 base slot, then we need
889 * to delete that IPv6 filter ...
890 * For adapters below T6, IPv6 filter occupies 4 entries.
892 if (chip_ver < CHELSIO_T6)
893 fidx = filter_id & ~0x3;
895 fidx = filter_id & ~0x1;
897 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
898 f = &adapter->tids.ftid_tab[fidx];
903 unsigned int max_filter_id;
905 if (chip_ver < CHELSIO_T6) {
907 * Ensure that the IPv6 filter is aligned on a
908 * multiple of 4 boundary.
913 max_filter_id = filter_id + 4;
916 * For T6, CLIP being enabled, IPv6 filter would occupy
922 max_filter_id = filter_id + 2;
926 * Check all except the base overlapping IPv4 filter
929 for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
930 f = &adapter->tids.ftid_tab[fidx];
937 * Check to make sure that provided filter index is not
938 * already in use by someone else
940 f = &adapter->tids.ftid_tab[filter_id];
944 fidx = adapter->tids.ftid_base + filter_id;
946 ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
947 fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
952 * Check to make sure the filter requested is writable ...
954 ret = writable_filter(f);
956 /* Clear the bits we have set above */
957 cxgbe_clear_ftid(&adapter->tids, fid_bit,
958 fs->type ? FILTER_TYPE_IPV6 :
964 * Allocate a clip table entry only if we have non-zero IPv6 address
966 if (chip_ver > CHELSIO_T5 && fs->type &&
967 memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
968 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
974 * Convert the filter specification into our internal format.
975 * We copy the PF/VF specification into the Outer VLAN field
976 * here so the rest of the code -- including the interface to
977 * the firmware -- doesn't have to constantly do these checks.
984 * Attempt to set the filter. If we don't succeed, we clear
985 * it and return the failure.
988 f->tid = fidx; /* Save the actual tid */
989 ret = set_filter_wr(dev, filter_id);
991 fid_bit = f->tid - adapter->tids.ftid_base;
998 cxgbe_clear_ftid(&adapter->tids, fid_bit,
999 fs->type ? FILTER_TYPE_IPV6 :
1006 * Handle a Hash filter write reply.
1008 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1010 struct tid_info *t = &adap->tids;
1011 struct filter_entry *f;
1012 struct filter_ctx *ctx = NULL;
1013 unsigned int tid = GET_TID(rpl);
1014 unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1015 (be32_to_cpu(rpl->atid_status)));
1016 unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1018 f = lookup_atid(t, ftid);
1020 dev_warn(adap, "%s: could not find filter entry: %d\n",
1029 case CPL_ERR_NONE: {
1031 f->pending = 0; /* asynchronous setup completed */
1034 cxgbe_insert_tid(t, f, f->tid, 0);
1035 cxgbe_free_atid(t, ftid);
1041 set_tcb_field(adap, tid,
1043 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1044 V_TCB_T_RTT_TS_RECENT_AGE
1045 (M_TCB_T_RTT_TS_RECENT_AGE),
1046 V_TCB_TIMESTAMP(0ULL) |
1047 V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1052 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1056 if (status == CPL_ERR_TCAM_FULL)
1057 ctx->result = -EAGAIN;
1059 ctx->result = -EINVAL;
1062 cxgbe_free_atid(t, ftid);
1067 t4_complete(&ctx->completion);
1071 * Handle a LE-TCAM filter write/deletion reply.
1073 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1075 struct filter_entry *f = NULL;
1076 unsigned int tid = GET_TID(rpl);
1077 int idx, max_fidx = adap->tids.nftids;
1079 /* Get the corresponding filter entry for this tid */
1080 if (adap->tids.ftid_tab) {
1081 /* Check this in normal filter region */
1082 idx = tid - adap->tids.ftid_base;
1083 if (idx >= max_fidx)
1086 f = &adap->tids.ftid_tab[idx];
1091 /* We found the filter entry for this tid */
1093 unsigned int ret = G_COOKIE(rpl->cookie);
1094 struct filter_ctx *ctx;
1097 * Pull off any filter operation context attached to the
1103 if (ret == FW_FILTER_WR_FLT_ADDED) {
1104 f->pending = 0; /* asynchronous setup completed */
1110 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1112 * Clear the filter when we get confirmation from the
1113 * hardware that the filter has been deleted.
1120 * Something went wrong. Issue a warning about the
1121 * problem and clear everything out.
1123 dev_warn(adap, "filter %u setup failed with error %u\n",
1127 ctx->result = -EINVAL;
1131 t4_complete(&ctx->completion);
1136 * Retrieve the packet count for the specified filter.
1138 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1139 u64 *c, int hash, bool get_byte)
1141 struct filter_entry *f;
1142 unsigned int tcb_base, tcbaddr;
1145 tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1146 if (is_hashfilter(adapter) && hash) {
1147 if (fidx < adapter->tids.ntids) {
1148 f = adapter->tids.tid_tab[fidx];
1152 if (is_t5(adapter->params.chip)) {
1156 tcbaddr = tcb_base + (fidx * TCB_SIZE);
1162 if (fidx >= adapter->tids.nftids)
1165 f = &adapter->tids.ftid_tab[fidx];
1169 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1172 f = &adapter->tids.ftid_tab[fidx];
1177 if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1179 * For T5, the Filter Packet Hit Count is maintained as a
1180 * 32-bit Big Endian value in the TCB field {timestamp}.
1181 * Similar to the craziness above, instead of the filter hit
1182 * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1183 * sizeof(u32)), it actually shows up at offset 24. Whacky.
1186 unsigned int word_offset = 4;
1187 __be64 be64_byte_count;
1189 t4_os_lock(&adapter->win0_lock);
1190 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1192 (word_offset * sizeof(__be32)),
1193 sizeof(be64_byte_count),
1196 t4_os_unlock(&adapter->win0_lock);
1199 *c = be64_to_cpu(be64_byte_count);
1201 unsigned int word_offset = 6;
1204 t4_os_lock(&adapter->win0_lock);
1205 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1207 (word_offset * sizeof(__be32)),
1208 sizeof(be32_count), &be32_count,
1210 t4_os_unlock(&adapter->win0_lock);
1213 *c = (u64)be32_to_cpu(be32_count);
1220 * Handle a Hash filter delete reply.
1222 void hash_del_filter_rpl(struct adapter *adap,
1223 const struct cpl_abort_rpl_rss *rpl)
1225 struct tid_info *t = &adap->tids;
1226 struct filter_entry *f;
1227 struct filter_ctx *ctx = NULL;
1228 unsigned int tid = GET_TID(rpl);
1230 f = lookup_tid(t, tid);
1232 dev_warn(adap, "%s: could not find filter entry: %u\n",
1243 cxgbe_clip_release(f->dev, f->clipt);
1245 cxgbe_remove_tid(t, 0, tid, 0);
1250 t4_complete(&ctx->completion);