1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
15 * Initialize Hash Filters
17 int cxgbe_init_hash_filter(struct adapter *adap)
19 unsigned int n_user_filters;
20 unsigned int user_filter_perc;
22 u32 params[7], val[7];
24 #define FW_PARAM_DEV(param) \
25 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
26 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
28 #define FW_PARAM_PFVF(param) \
29 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
30 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
31 V_FW_PARAMS_PARAM_Y(0) | \
32 V_FW_PARAMS_PARAM_Z(0))
34 params[0] = FW_PARAM_DEV(NTID);
35 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
39 adap->tids.ntids = val[0];
40 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
42 user_filter_perc = 100;
43 n_user_filters = mult_frac(adap->tids.nftids,
47 adap->tids.nftids = n_user_filters;
48 adap->params.hash_filter = 1;
53 * Validate if the requested filter specification can be set by checking
54 * if the requested features have been enabled
56 int cxgbe_validate_filter(struct adapter *adapter,
57 struct ch_filter_specification *fs)
62 * Check for unconfigured fields being used.
64 fconf = adapter->params.tp.vlan_pri_map;
67 (fs->val._field || fs->mask._field)
68 #define U(_mask, _field) \
69 (!(fconf & (_mask)) && S(_field))
71 if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
72 U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
80 * If the user is requesting that the filter action loop
81 * matching packets back out one of our ports, make sure that
82 * the egress port is in range.
84 if (fs->action == FILTER_SWITCH &&
85 fs->eport >= adapter->params.nports)
89 * Don't allow various trivially obvious bogus out-of-range
92 if (fs->val.iport >= adapter->params.nports)
95 if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
98 if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
105 * Get the queue to which the traffic must be steered to.
107 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
108 struct ch_filter_specification *fs)
110 struct port_info *pi = ethdev2pinfo(dev);
111 struct adapter *adapter = pi->adapter;
115 * If the user has requested steering matching Ingress Packets
116 * to a specific Queue Set, we need to make sure it's in range
117 * for the port and map that into the Absolute Queue ID of the
118 * Queue Set's Response Queue.
124 * If the iq id is greater than the number of qsets,
125 * then assume it is an absolute qid.
127 if (fs->iq < pi->n_rx_qsets)
128 iq = adapter->sge.ethrxq[pi->first_qset +
137 /* Return an error number if the indicated filter isn't writable ... */
138 static int writable_filter(struct filter_entry *f)
149 * Send CPL_SET_TCB_FIELD message
151 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
152 u16 word, u64 mask, u64 val, int no_reply)
154 struct rte_mbuf *mbuf;
155 struct cpl_set_tcb_field *req;
156 struct sge_ctrl_txq *ctrlq;
158 ctrlq = &adapter->sge.ctrlq[0];
159 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
162 mbuf->data_len = sizeof(*req);
163 mbuf->pkt_len = mbuf->data_len;
165 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
166 memset(req, 0, sizeof(*req));
167 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
168 req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
169 V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
170 V_NO_REPLY(no_reply));
171 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
172 req->mask = cpu_to_be64(mask);
173 req->val = cpu_to_be64(val);
175 t4_mgmt_tx(ctrlq, mbuf);
179 * Set one of the t_flags bits in the TCB.
181 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
182 unsigned int bit_pos, unsigned int val, int no_reply)
184 set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos,
185 (unsigned long long)val << bit_pos, no_reply);
189 * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
191 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
192 struct cpl_set_tcb_field *req,
194 u64 mask, u64 val, u8 cookie,
197 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
198 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
200 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
201 V_ULP_TXPKT_DEST(0));
202 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
203 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
204 sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
205 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
206 req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
208 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
209 req->mask = cpu_to_be64(mask);
210 req->val = cpu_to_be64(val);
211 sc = (struct ulptx_idata *)(req + 1);
212 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
213 sc->len = cpu_to_be32(0);
217 * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
218 * IPv4 requires only 1 slot on all cards.
220 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
222 if (family == FILTER_TYPE_IPV6) {
223 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
233 * Check if entries are already filled.
235 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
240 /* Ensure there's enough slots available. */
241 t4_os_lock(&t->ftid_lock);
242 for (i = fidx; i < fidx + nentries; i++) {
243 if (rte_bitmap_get(t->ftid_bmap, i)) {
248 t4_os_unlock(&t->ftid_lock);
253 * Allocate available free entries.
255 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
257 struct tid_info *t = &adap->tids;
259 int size = t->nftids;
261 t4_os_lock(&t->ftid_lock);
263 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
266 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
267 t4_os_unlock(&t->ftid_lock);
269 return pos < size ? pos : -1;
273 * Construct hash filter ntuple.
275 static u64 hash_filter_ntuple(const struct filter_entry *f)
277 struct adapter *adap = ethdev2adap(f->dev);
278 struct tp_params *tp = &adap->params.tp;
280 u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
282 if (tp->port_shift >= 0 && f->fs.mask.iport)
283 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
285 if (tp->protocol_shift >= 0) {
286 if (!f->fs.val.proto)
287 ntuple |= (u64)tcp_proto << tp->protocol_shift;
289 ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
292 if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
293 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
294 if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
295 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
296 if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
297 ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
304 * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
306 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
309 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
310 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
312 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
313 V_ULP_TXPKT_DEST(0));
314 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
315 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
316 sc->len = cpu_to_be32(sizeof(*abort_req) -
317 sizeof(struct work_request_hdr));
318 OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
319 abort_req->rsvd0 = cpu_to_be32(0);
320 abort_req->rsvd1 = 0;
321 abort_req->cmd = CPL_ABORT_NO_RST;
322 sc = (struct ulptx_idata *)(abort_req + 1);
323 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
324 sc->len = cpu_to_be32(0);
328 * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
330 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
333 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
334 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
336 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
337 V_ULP_TXPKT_DEST(0));
338 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
339 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
340 sc->len = cpu_to_be32(sizeof(*abort_rpl) -
341 sizeof(struct work_request_hdr));
342 OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
343 abort_rpl->rsvd0 = cpu_to_be32(0);
344 abort_rpl->rsvd1 = 0;
345 abort_rpl->cmd = CPL_ABORT_NO_RST;
346 sc = (struct ulptx_idata *)(abort_rpl + 1);
347 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
348 sc->len = cpu_to_be32(0);
352 * Delete the specified hash filter.
354 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
355 unsigned int filter_id,
356 struct filter_ctx *ctx)
358 struct adapter *adapter = ethdev2adap(dev);
359 struct tid_info *t = &adapter->tids;
360 struct filter_entry *f;
361 struct sge_ctrl_txq *ctrlq;
362 unsigned int port_id = ethdev2pinfo(dev)->port_id;
365 if (filter_id > adapter->tids.ntids)
368 f = lookup_tid(t, filter_id);
370 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
371 __func__, filter_id);
375 ret = writable_filter(f);
381 struct rte_mbuf *mbuf;
382 struct work_request_hdr *wr;
383 struct ulptx_idata *aligner;
384 struct cpl_set_tcb_field *req;
385 struct cpl_abort_req *abort_req;
386 struct cpl_abort_rpl *abort_rpl;
391 wrlen = cxgbe_roundup(sizeof(*wr) +
392 (sizeof(*req) + sizeof(*aligner)) +
393 sizeof(*abort_req) + sizeof(*abort_rpl),
396 ctrlq = &adapter->sge.ctrlq[port_id];
397 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
399 dev_err(adapter, "%s: could not allocate skb ..\n",
404 mbuf->data_len = wrlen;
405 mbuf->pkt_len = mbuf->data_len;
407 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
408 INIT_ULPTX_WR(req, wrlen, 0, 0);
409 wr = (struct work_request_hdr *)req;
411 req = (struct cpl_set_tcb_field *)wr;
412 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
413 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
414 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
416 aligner = (struct ulptx_idata *)(req + 1);
417 abort_req = (struct cpl_abort_req *)(aligner + 1);
418 mk_abort_req_ulp(abort_req, f->tid);
419 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
420 mk_abort_rpl_ulp(abort_rpl, f->tid);
421 t4_mgmt_tx(ctrlq, mbuf);
430 * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
432 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
433 unsigned int qid_filterid, struct adapter *adap)
435 struct cpl_t6_act_open_req6 *req = NULL;
436 u64 local_lo, local_hi, peer_lo, peer_hi;
437 u32 *lip = (u32 *)f->fs.val.lip;
438 u32 *fip = (u32 *)f->fs.val.fip;
440 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
442 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
447 dev_err(adap, "%s: unsupported chip type!\n", __func__);
451 local_hi = ((u64)lip[1]) << 32 | lip[0];
452 local_lo = ((u64)lip[3]) << 32 | lip[2];
453 peer_hi = ((u64)fip[1]) << 32 | fip[0];
454 peer_lo = ((u64)fip[3]) << 32 | fip[2];
456 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
458 req->local_port = cpu_to_be16(f->fs.val.lport);
459 req->peer_port = cpu_to_be16(f->fs.val.fport);
460 req->local_ip_hi = local_hi;
461 req->local_ip_lo = local_lo;
462 req->peer_ip_hi = peer_hi;
463 req->peer_ip_lo = peer_lo;
464 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
465 f->fs.newvlan == VLAN_REWRITE) |
466 V_DELACK(f->fs.hitcnts) |
467 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
468 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
470 V_TX_CHAN(f->fs.eport) |
471 V_ULP_MODE(ULP_MODE_NONE) |
472 F_TCAM_BYPASS | F_NON_OFFLOAD);
473 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
474 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
475 V_RSS_QUEUE(f->fs.iq) |
478 V_SACK_EN(f->fs.swapmac) |
479 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
480 (f->fs.dirsteer << 1)) |
481 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
485 * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
487 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
488 unsigned int qid_filterid, struct adapter *adap)
490 struct cpl_t6_act_open_req *req = NULL;
492 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
494 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
499 dev_err(adap, "%s: unsupported chip type!\n", __func__);
503 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
505 req->local_port = cpu_to_be16(f->fs.val.lport);
506 req->peer_port = cpu_to_be16(f->fs.val.fport);
507 req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
508 f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
509 req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
510 f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
511 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
512 f->fs.newvlan == VLAN_REWRITE) |
513 V_DELACK(f->fs.hitcnts) |
514 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
515 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
517 V_TX_CHAN(f->fs.eport) |
518 V_ULP_MODE(ULP_MODE_NONE) |
519 F_TCAM_BYPASS | F_NON_OFFLOAD);
520 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
521 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
522 V_RSS_QUEUE(f->fs.iq) |
525 V_SACK_EN(f->fs.swapmac) |
526 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
527 (f->fs.dirsteer << 1)) |
528 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
532 * Set the specified hash filter.
534 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
535 struct ch_filter_specification *fs,
536 struct filter_ctx *ctx)
538 struct port_info *pi = ethdev2pinfo(dev);
539 struct adapter *adapter = pi->adapter;
540 struct tid_info *t = &adapter->tids;
541 struct filter_entry *f;
542 struct rte_mbuf *mbuf;
543 struct sge_ctrl_txq *ctrlq;
548 ret = cxgbe_validate_filter(adapter, fs);
552 iq = get_filter_steerq(dev, fs);
554 ctrlq = &adapter->sge.ctrlq[pi->port_id];
556 f = t4_os_alloc(sizeof(*f));
566 * If the new filter requires loopback Destination MAC and/or VLAN
567 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
570 if (f->fs.newvlan == VLAN_INSERT ||
571 f->fs.newvlan == VLAN_REWRITE) {
572 /* allocate L2T entry for new filter */
573 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
574 f->fs.eport, f->fs.dmac);
581 atid = cxgbe_alloc_atid(t, f);
585 if (f->fs.type == FILTER_TYPE_IPV6) {
586 /* IPv6 hash filter */
587 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
591 size = sizeof(struct cpl_t6_act_open_req6);
592 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
598 mbuf->data_len = size;
599 mbuf->pkt_len = mbuf->data_len;
601 mk_act_open_req6(f, mbuf,
602 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
605 /* IPv4 hash filter */
606 size = sizeof(struct cpl_t6_act_open_req);
607 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
613 mbuf->data_len = size;
614 mbuf->pkt_len = mbuf->data_len;
616 mk_act_open_req(f, mbuf,
617 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
622 t4_mgmt_tx(ctrlq, mbuf);
626 cxgbe_clip_release(f->dev, f->clipt);
628 cxgbe_free_atid(t, atid);
636 * Clear a filter and release any of its resources that we own. This also
637 * clears the filter's "pending" status.
639 static void clear_filter(struct filter_entry *f)
642 cxgbe_clip_release(f->dev, f->clipt);
645 * The zeroing of the filter rule below clears the filter valid,
646 * pending, locked flags etc. so it's all we need for
649 memset(f, 0, sizeof(*f));
653 * t4_mk_filtdelwr - create a delete filter WR
654 * @adap: adapter context
655 * @ftid: the filter ID
656 * @wr: the filter work request to populate
657 * @qid: ingress queue to receive the delete notification
659 * Creates a filter work request to delete the supplied filter. If @qid is
660 * negative the delete notification is suppressed.
662 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
663 struct fw_filter2_wr *wr, int qid)
665 memset(wr, 0, sizeof(*wr));
666 if (adap->params.filter2_wr_support)
667 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
669 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
670 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
671 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
672 V_FW_FILTER_WR_NOREPLY(qid < 0));
673 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
675 wr->rx_chan_rx_rpl_iq =
676 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
680 * Create FW work request to delete the filter at a specified index
682 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
684 struct adapter *adapter = ethdev2adap(dev);
685 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
686 struct rte_mbuf *mbuf;
687 struct fw_filter2_wr *fwr;
688 struct sge_ctrl_txq *ctrlq;
689 unsigned int port_id = ethdev2pinfo(dev)->port_id;
691 ctrlq = &adapter->sge.ctrlq[port_id];
692 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
696 mbuf->data_len = sizeof(*fwr);
697 mbuf->pkt_len = mbuf->data_len;
699 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
700 t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
703 * Mark the filter as "pending" and ship off the Filter Work Request.
704 * When we get the Work Request Reply we'll clear the pending status.
707 t4_mgmt_tx(ctrlq, mbuf);
711 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
713 struct adapter *adapter = ethdev2adap(dev);
714 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
715 struct rte_mbuf *mbuf;
716 struct fw_filter2_wr *fwr;
717 struct sge_ctrl_txq *ctrlq;
718 unsigned int port_id = ethdev2pinfo(dev)->port_id;
722 * If the new filter requires loopback Destination MAC and/or VLAN
723 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
727 /* allocate L2T entry for new filter */
728 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
729 f->fs.eport, f->fs.dmac);
734 ctrlq = &adapter->sge.ctrlq[port_id];
735 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
741 mbuf->data_len = sizeof(*fwr);
742 mbuf->pkt_len = mbuf->data_len;
744 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
745 memset(fwr, 0, sizeof(*fwr));
748 * Construct the work request to set the filter.
750 if (adapter->params.filter2_wr_support)
751 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
753 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
754 fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
756 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
757 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
758 V_FW_FILTER_WR_NOREPLY(0) |
759 V_FW_FILTER_WR_IQ(f->fs.iq));
760 fwr->del_filter_to_l2tix =
761 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
762 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
763 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
764 V_FW_FILTER_WR_INSVLAN
765 (f->fs.newvlan == VLAN_INSERT ||
766 f->fs.newvlan == VLAN_REWRITE) |
767 V_FW_FILTER_WR_RMVLAN
768 (f->fs.newvlan == VLAN_REMOVE ||
769 f->fs.newvlan == VLAN_REWRITE) |
770 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
771 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
772 V_FW_FILTER_WR_PRIO(f->fs.prio) |
773 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
774 fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
775 fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
776 fwr->frag_to_ovlan_vldm =
777 (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
778 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld));
780 fwr->rx_chan_rx_rpl_iq =
781 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
782 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
784 fwr->maci_to_matchtypem =
785 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
786 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
787 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
788 V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
789 fwr->ptcl = f->fs.val.proto;
790 fwr->ptclm = f->fs.mask.proto;
791 fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
792 fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
793 rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
794 rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
795 rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
796 rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
797 fwr->lp = cpu_to_be16(f->fs.val.lport);
798 fwr->lpm = cpu_to_be16(f->fs.mask.lport);
799 fwr->fp = cpu_to_be16(f->fs.val.fport);
800 fwr->fpm = cpu_to_be16(f->fs.mask.fport);
802 if (adapter->params.filter2_wr_support) {
803 fwr->filter_type_swapmac =
804 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
805 fwr->natmode_to_ulp_type =
806 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
809 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
810 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
811 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
812 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
813 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
817 * Mark the filter as "pending" and ship off the Filter Work Request.
818 * When we get the Work Request Reply we'll clear the pending status.
821 t4_mgmt_tx(ctrlq, mbuf);
829 * Set the corresponding entries in the bitmap.
831 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
835 t4_os_lock(&t->ftid_lock);
836 if (rte_bitmap_get(t->ftid_bmap, fidx)) {
837 t4_os_unlock(&t->ftid_lock);
841 for (i = fidx; i < fidx + nentries; i++)
842 rte_bitmap_set(t->ftid_bmap, i);
843 t4_os_unlock(&t->ftid_lock);
848 * Clear the corresponding entries in the bitmap.
850 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
854 t4_os_lock(&t->ftid_lock);
855 for (i = fidx; i < fidx + nentries; i++)
856 rte_bitmap_clear(t->ftid_bmap, i);
857 t4_os_unlock(&t->ftid_lock);
861 * Check a delete filter request for validity and send it to the hardware.
862 * Return 0 on success, an error number otherwise. We attach any provided
863 * filter operation context to the internal filter specification in order to
864 * facilitate signaling completion of the operation.
866 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
867 struct ch_filter_specification *fs,
868 struct filter_ctx *ctx)
870 struct port_info *pi = dev->data->dev_private;
871 struct adapter *adapter = pi->adapter;
872 struct filter_entry *f;
873 unsigned int chip_ver;
877 if (is_hashfilter(adapter) && fs->cap)
878 return cxgbe_del_hash_filter(dev, filter_id, ctx);
880 if (filter_id >= adapter->tids.nftids)
883 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
886 * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
887 * and 4 slot boundary for cards below T6.
889 if (fs->type == FILTER_TYPE_IPV6) {
890 if (chip_ver < CHELSIO_T6)
896 nentries = cxgbe_filter_slots(adapter, fs->type);
897 ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
899 dev_warn(adap, "%s: could not find filter entry: %u\n",
900 __func__, filter_id);
904 f = &adapter->tids.ftid_tab[filter_id];
905 ret = writable_filter(f);
911 cxgbe_clear_ftid(&adapter->tids,
912 f->tid - adapter->tids.ftid_base,
914 return del_filter_wr(dev, filter_id);
918 * If the caller has passed in a Completion Context then we need to
919 * mark it as a successful completion so they don't stall waiting
924 t4_complete(&ctx->completion);
931 * Check a Chelsio Filter Request for validity, convert it into our internal
932 * format and send it to the hardware. Return 0 on success, an error number
933 * otherwise. We attach any provided filter operation context to the internal
934 * filter specification in order to facilitate signaling completion of the
937 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
938 struct ch_filter_specification *fs,
939 struct filter_ctx *ctx)
941 struct port_info *pi = ethdev2pinfo(dev);
942 struct adapter *adapter = pi->adapter;
943 unsigned int fidx, iq;
944 struct filter_entry *f;
945 unsigned int chip_ver;
946 u8 nentries, bitoff[16] = {0};
949 if (is_hashfilter(adapter) && fs->cap)
950 return cxgbe_set_hash_filter(dev, fs, ctx);
952 if (filter_id >= adapter->tids.nftids)
955 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
957 ret = cxgbe_validate_filter(adapter, fs);
962 * IPv6 filters occupy four slots and must be aligned on four-slot
963 * boundaries for T5. On T6, IPv6 filters occupy two-slots and
964 * must be aligned on two-slot boundaries.
966 * IPv4 filters only occupy a single slot and have no alignment
970 if (fs->type == FILTER_TYPE_IPV6) {
971 if (chip_ver < CHELSIO_T6)
977 if (fidx != filter_id)
980 nentries = cxgbe_filter_slots(adapter, fs->type);
981 ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
985 iq = get_filter_steerq(dev, fs);
988 * Check to make sure that provided filter index is not
989 * already in use by someone else
991 f = &adapter->tids.ftid_tab[filter_id];
995 fidx = adapter->tids.ftid_base + filter_id;
996 ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
1001 * Check to make sure the filter requested is writable ...
1003 ret = writable_filter(f);
1005 /* Clear the bits we have set above */
1006 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1011 * Allocate a clip table entry only if we have non-zero IPv6 address
1013 if (chip_ver > CHELSIO_T5 && fs->type &&
1014 memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1015 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&fs->val.lip);
1021 * Convert the filter specification into our internal format.
1022 * We copy the PF/VF specification into the Outer VLAN field
1023 * here so the rest of the code -- including the interface to
1024 * the firmware -- doesn't have to constantly do these checks.
1031 * Attempt to set the filter. If we don't succeed, we clear
1032 * it and return the failure.
1035 f->tid = fidx; /* Save the actual tid */
1036 ret = set_filter_wr(dev, filter_id);
1043 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1049 * Handle a Hash filter write reply.
1051 void cxgbe_hash_filter_rpl(struct adapter *adap,
1052 const struct cpl_act_open_rpl *rpl)
1054 struct tid_info *t = &adap->tids;
1055 struct filter_entry *f;
1056 struct filter_ctx *ctx = NULL;
1057 unsigned int tid = GET_TID(rpl);
1058 unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1059 (be32_to_cpu(rpl->atid_status)));
1060 unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1062 f = lookup_atid(t, ftid);
1064 dev_warn(adap, "%s: could not find filter entry: %d\n",
1073 case CPL_ERR_NONE: {
1075 f->pending = 0; /* asynchronous setup completed */
1078 cxgbe_insert_tid(t, f, f->tid, 0);
1079 cxgbe_free_atid(t, ftid);
1085 set_tcb_field(adap, tid,
1087 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1088 V_TCB_T_RTT_TS_RECENT_AGE
1089 (M_TCB_T_RTT_TS_RECENT_AGE),
1090 V_TCB_TIMESTAMP(0ULL) |
1091 V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1093 if (f->fs.newvlan == VLAN_INSERT ||
1094 f->fs.newvlan == VLAN_REWRITE)
1095 set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1099 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1103 if (status == CPL_ERR_TCAM_FULL)
1104 ctx->result = -EAGAIN;
1106 ctx->result = -EINVAL;
1109 cxgbe_free_atid(t, ftid);
1114 t4_complete(&ctx->completion);
1118 * Handle a LE-TCAM filter write/deletion reply.
1120 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1122 struct filter_entry *f = NULL;
1123 unsigned int tid = GET_TID(rpl);
1124 int idx, max_fidx = adap->tids.nftids;
1126 /* Get the corresponding filter entry for this tid */
1127 if (adap->tids.ftid_tab) {
1128 /* Check this in normal filter region */
1129 idx = tid - adap->tids.ftid_base;
1130 if (idx >= max_fidx)
1133 f = &adap->tids.ftid_tab[idx];
1138 /* We found the filter entry for this tid */
1140 unsigned int ret = G_COOKIE(rpl->cookie);
1141 struct filter_ctx *ctx;
1144 * Pull off any filter operation context attached to the
1150 if (ret == FW_FILTER_WR_FLT_ADDED) {
1151 f->pending = 0; /* asynchronous setup completed */
1157 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1159 * Clear the filter when we get confirmation from the
1160 * hardware that the filter has been deleted.
1167 * Something went wrong. Issue a warning about the
1168 * problem and clear everything out.
1170 dev_warn(adap, "filter %u setup failed with error %u\n",
1174 ctx->result = -EINVAL;
1178 t4_complete(&ctx->completion);
1183 * Retrieve the packet count for the specified filter.
1185 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1186 u64 *c, int hash, bool get_byte)
1188 struct filter_entry *f;
1189 unsigned int tcb_base, tcbaddr;
1192 tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1193 if (is_hashfilter(adapter) && hash) {
1194 if (fidx < adapter->tids.ntids) {
1195 f = adapter->tids.tid_tab[fidx];
1199 if (is_t5(adapter->params.chip)) {
1203 tcbaddr = tcb_base + (fidx * TCB_SIZE);
1209 if (fidx >= adapter->tids.nftids)
1212 f = &adapter->tids.ftid_tab[fidx];
1216 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1219 f = &adapter->tids.ftid_tab[fidx];
1224 if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1226 * For T5, the Filter Packet Hit Count is maintained as a
1227 * 32-bit Big Endian value in the TCB field {timestamp}.
1228 * Similar to the craziness above, instead of the filter hit
1229 * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1230 * sizeof(u32)), it actually shows up at offset 24. Whacky.
1233 unsigned int word_offset = 4;
1234 __be64 be64_byte_count;
1236 t4_os_lock(&adapter->win0_lock);
1237 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1239 (word_offset * sizeof(__be32)),
1240 sizeof(be64_byte_count),
1243 t4_os_unlock(&adapter->win0_lock);
1246 *c = be64_to_cpu(be64_byte_count);
1248 unsigned int word_offset = 6;
1251 t4_os_lock(&adapter->win0_lock);
1252 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1254 (word_offset * sizeof(__be32)),
1255 sizeof(be32_count), &be32_count,
1257 t4_os_unlock(&adapter->win0_lock);
1260 *c = (u64)be32_to_cpu(be32_count);
1267 * Clear the packet count for the specified filter.
1269 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1270 int hash, bool clear_byte)
1272 u64 tcb_mask = 0, tcb_val = 0;
1273 struct filter_entry *f = NULL;
1276 if (is_hashfilter(adapter) && hash) {
1277 if (fidx >= adapter->tids.ntids)
1280 /* No hitcounts supported for T5 hashfilters */
1281 if (is_t5(adapter->params.chip))
1284 f = adapter->tids.tid_tab[fidx];
1286 if (fidx >= adapter->tids.nftids)
1289 f = &adapter->tids.ftid_tab[fidx];
1292 if (!f || !f->valid)
1295 tcb_word = W_TCB_TIMESTAMP;
1296 tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1297 tcb_val = V_TCB_TIMESTAMP(0ULL);
1299 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1302 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1304 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1305 V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1306 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1307 V_TCB_T_RTSEQ_RECENT(0ULL);
1309 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1316 * Handle a Hash filter delete reply.
1318 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1319 const struct cpl_abort_rpl_rss *rpl)
1321 struct tid_info *t = &adap->tids;
1322 struct filter_entry *f;
1323 struct filter_ctx *ctx = NULL;
1324 unsigned int tid = GET_TID(rpl);
1326 f = lookup_tid(t, tid);
1328 dev_warn(adap, "%s: could not find filter entry: %u\n",
1339 cxgbe_clip_release(f->dev, f->clipt);
1341 cxgbe_remove_tid(t, 0, tid, 0);
1346 t4_complete(&ctx->completion);