1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
15 #include "cxgbe_pfvf.h"
18 * Initialize Hash Filters
20 int cxgbe_init_hash_filter(struct adapter *adap)
22 unsigned int user_filter_perc, n_user_filters;
26 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
27 val = t4_read_reg(adap, A_LE_DB_RSP_CODE_0);
28 if (G_TCAM_ACTV_HIT(val) != 4) {
29 adap->params.hash_filter = 0;
33 val = t4_read_reg(adap, A_LE_DB_RSP_CODE_1);
34 if (G_HASH_ACTV_HIT(val) != 4) {
35 adap->params.hash_filter = 0;
40 param = CXGBE_FW_PARAM_DEV(NTID);
41 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
45 adap->tids.ntids = val;
46 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
48 user_filter_perc = 100;
49 n_user_filters = mult_frac(adap->tids.nftids,
53 adap->tids.nftids = n_user_filters;
54 adap->params.hash_filter = 1;
59 * Validate if the requested filter specification can be set by checking
60 * if the requested features have been enabled
62 int cxgbe_validate_filter(struct adapter *adapter,
63 struct ch_filter_specification *fs)
68 * Check for unconfigured fields being used.
70 fconf = fs->cap ? adapter->params.tp.filter_mask :
71 adapter->params.tp.vlan_pri_map;
73 iconf = adapter->params.tp.ingress_config;
76 (fs->val._field || fs->mask._field)
77 #define U(_mask, _field) \
78 (!(fconf & (_mask)) && S(_field))
80 if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
81 U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
82 U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
83 U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
86 /* Either OVLAN or PFVF match is enabled in hardware, but not both */
87 if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
88 (S(ovlan_vld) && (iconf & F_VNIC)))
91 /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
92 if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
93 (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
100 * If the user is requesting that the filter action loop
101 * matching packets back out one of our ports, make sure that
102 * the egress port is in range.
104 if (fs->action == FILTER_SWITCH &&
105 fs->eport >= adapter->params.nports)
109 * Don't allow various trivially obvious bogus out-of-range
112 if (fs->val.iport >= adapter->params.nports)
115 if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
118 if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
125 * Get the queue to which the traffic must be steered to.
127 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
128 struct ch_filter_specification *fs)
130 struct port_info *pi = ethdev2pinfo(dev);
131 struct adapter *adapter = pi->adapter;
135 * If the user has requested steering matching Ingress Packets
136 * to a specific Queue Set, we need to make sure it's in range
137 * for the port and map that into the Absolute Queue ID of the
138 * Queue Set's Response Queue.
144 * If the iq id is greater than the number of qsets,
145 * then assume it is an absolute qid.
147 if (fs->iq < pi->n_rx_qsets)
148 iq = adapter->sge.ethrxq[pi->first_qset +
157 /* Return an error number if the indicated filter isn't writable ... */
158 static int writable_filter(struct filter_entry *f)
169 * Send CPL_SET_TCB_FIELD message
171 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
172 u16 word, u64 mask, u64 val, int no_reply)
174 struct rte_mbuf *mbuf;
175 struct cpl_set_tcb_field *req;
176 struct sge_ctrl_txq *ctrlq;
178 ctrlq = &adapter->sge.ctrlq[0];
179 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
182 mbuf->data_len = sizeof(*req);
183 mbuf->pkt_len = mbuf->data_len;
185 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
186 memset(req, 0, sizeof(*req));
187 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
188 req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
189 V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
190 V_NO_REPLY(no_reply));
191 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
192 req->mask = cpu_to_be64(mask);
193 req->val = cpu_to_be64(val);
195 t4_mgmt_tx(ctrlq, mbuf);
199 * Set one of the t_flags bits in the TCB.
201 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
202 unsigned int bit_pos, unsigned int val, int no_reply)
204 set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos,
205 (unsigned long long)val << bit_pos, no_reply);
209 * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
211 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
212 struct cpl_set_tcb_field *req,
214 u64 mask, u64 val, u8 cookie,
217 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
218 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
220 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
221 V_ULP_TXPKT_DEST(0));
222 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
223 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
224 sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
225 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
226 req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
228 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
229 req->mask = cpu_to_be64(mask);
230 req->val = cpu_to_be64(val);
231 sc = (struct ulptx_idata *)(req + 1);
232 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
233 sc->len = cpu_to_be32(0);
237 * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
238 * IPv4 requires only 1 slot on all cards.
240 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
242 if (family == FILTER_TYPE_IPV6) {
243 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
253 * Check if entries are already filled.
255 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
260 /* Ensure there's enough slots available. */
261 t4_os_lock(&t->ftid_lock);
262 for (i = fidx; i < fidx + nentries; i++) {
263 if (rte_bitmap_get(t->ftid_bmap, i)) {
268 t4_os_unlock(&t->ftid_lock);
273 * Allocate available free entries.
275 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
277 struct tid_info *t = &adap->tids;
279 int size = t->nftids;
281 t4_os_lock(&t->ftid_lock);
283 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
286 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
287 t4_os_unlock(&t->ftid_lock);
289 return pos < size ? pos : -1;
293 * Clear a filter and release any of its resources that we own. This also
294 * clears the filter's "pending" status.
296 static void clear_filter(struct filter_entry *f)
298 struct port_info *pi = ethdev2pinfo(f->dev);
301 cxgbe_clip_release(f->dev, f->clipt);
304 cxgbe_l2t_release(f->l2t);
306 if (f->fs.mask.macidx)
307 cxgbe_mpstcam_remove(pi, f->fs.val.macidx);
310 cxgbe_smt_release(f->smt);
312 /* The zeroing of the filter rule below clears the filter valid,
313 * pending, locked flags etc. so it's all we need for
316 memset(f, 0, sizeof(*f));
320 * Construct hash filter ntuple.
322 static u64 hash_filter_ntuple(const struct filter_entry *f)
324 struct adapter *adap = ethdev2adap(f->dev);
325 struct tp_params *tp = &adap->params.tp;
327 u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
329 if (tp->port_shift >= 0 && f->fs.mask.iport)
330 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
332 if (tp->protocol_shift >= 0) {
333 if (!f->fs.val.proto)
334 ntuple |= (u64)tcp_proto << tp->protocol_shift;
336 ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
339 if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
340 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
341 if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
342 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
343 if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
344 ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
346 if (tp->vnic_shift >= 0) {
347 if ((adap->params.tp.ingress_config & F_VNIC) &&
349 ntuple |= (u64)(f->fs.val.pfvf_vld << 16 |
350 f->fs.val.pf << 13 | f->fs.val.vf) <<
352 else if (!(adap->params.tp.ingress_config & F_VNIC) &&
353 f->fs.mask.ovlan_vld)
354 ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
355 f->fs.val.ovlan) << tp->vnic_shift;
357 if (tp->tos_shift >= 0 && f->fs.mask.tos)
358 ntuple |= (u64)f->fs.val.tos << tp->tos_shift;
364 * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
366 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
369 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
370 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
372 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
373 V_ULP_TXPKT_DEST(0));
374 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
375 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
376 sc->len = cpu_to_be32(sizeof(*abort_req) -
377 sizeof(struct work_request_hdr));
378 OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
379 abort_req->rsvd0 = cpu_to_be32(0);
380 abort_req->rsvd1 = 0;
381 abort_req->cmd = CPL_ABORT_NO_RST;
382 sc = (struct ulptx_idata *)(abort_req + 1);
383 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
384 sc->len = cpu_to_be32(0);
388 * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
390 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
393 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
394 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
396 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
397 V_ULP_TXPKT_DEST(0));
398 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
399 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
400 sc->len = cpu_to_be32(sizeof(*abort_rpl) -
401 sizeof(struct work_request_hdr));
402 OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
403 abort_rpl->rsvd0 = cpu_to_be32(0);
404 abort_rpl->rsvd1 = 0;
405 abort_rpl->cmd = CPL_ABORT_NO_RST;
406 sc = (struct ulptx_idata *)(abort_rpl + 1);
407 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
408 sc->len = cpu_to_be32(0);
412 * Delete the specified hash filter.
414 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
415 unsigned int filter_id,
416 struct filter_ctx *ctx)
418 struct adapter *adapter = ethdev2adap(dev);
419 struct tid_info *t = &adapter->tids;
420 struct filter_entry *f;
421 struct sge_ctrl_txq *ctrlq;
422 unsigned int port_id = ethdev2pinfo(dev)->port_id;
425 if (filter_id > adapter->tids.ntids)
428 f = lookup_tid(t, filter_id);
430 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
431 __func__, filter_id);
435 ret = writable_filter(f);
441 struct rte_mbuf *mbuf;
442 struct work_request_hdr *wr;
443 struct ulptx_idata *aligner;
444 struct cpl_set_tcb_field *req;
445 struct cpl_abort_req *abort_req;
446 struct cpl_abort_rpl *abort_rpl;
451 wrlen = cxgbe_roundup(sizeof(*wr) +
452 (sizeof(*req) + sizeof(*aligner)) +
453 sizeof(*abort_req) + sizeof(*abort_rpl),
456 ctrlq = &adapter->sge.ctrlq[port_id];
457 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
459 dev_err(adapter, "%s: could not allocate skb ..\n",
464 mbuf->data_len = wrlen;
465 mbuf->pkt_len = mbuf->data_len;
467 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
468 INIT_ULPTX_WR(req, wrlen, 0, 0);
469 wr = (struct work_request_hdr *)req;
471 req = (struct cpl_set_tcb_field *)wr;
472 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
473 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
474 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
476 aligner = (struct ulptx_idata *)(req + 1);
477 abort_req = (struct cpl_abort_req *)(aligner + 1);
478 mk_abort_req_ulp(abort_req, f->tid);
479 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
480 mk_abort_rpl_ulp(abort_rpl, f->tid);
481 t4_mgmt_tx(ctrlq, mbuf);
490 * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
492 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
493 unsigned int qid_filterid, struct adapter *adap)
495 struct cpl_t6_act_open_req6 *req = NULL;
496 u64 local_lo, local_hi, peer_lo, peer_hi;
497 u32 *lip = (u32 *)f->fs.val.lip;
498 u32 *fip = (u32 *)f->fs.val.fip;
500 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
502 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
507 dev_err(adap, "%s: unsupported chip type!\n", __func__);
511 local_hi = ((u64)lip[1]) << 32 | lip[0];
512 local_lo = ((u64)lip[3]) << 32 | lip[2];
513 peer_hi = ((u64)fip[1]) << 32 | fip[0];
514 peer_lo = ((u64)fip[3]) << 32 | fip[2];
516 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
518 req->local_port = cpu_to_be16(f->fs.val.lport);
519 req->peer_port = cpu_to_be16(f->fs.val.fport);
520 req->local_ip_hi = local_hi;
521 req->local_ip_lo = local_lo;
522 req->peer_ip_hi = peer_hi;
523 req->peer_ip_lo = peer_lo;
524 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
525 f->fs.newvlan == VLAN_REWRITE) |
526 V_DELACK(f->fs.hitcnts) |
527 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
528 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
530 V_TX_CHAN(f->fs.eport) |
531 V_ULP_MODE(ULP_MODE_NONE) |
532 F_TCAM_BYPASS | F_NON_OFFLOAD);
533 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
534 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
535 V_RSS_QUEUE(f->fs.iq) |
538 V_SACK_EN(f->fs.swapmac) |
539 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
540 (f->fs.dirsteer << 1)) |
541 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
545 * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
547 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
548 unsigned int qid_filterid, struct adapter *adap)
550 struct cpl_t6_act_open_req *req = NULL;
552 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
554 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
559 dev_err(adap, "%s: unsupported chip type!\n", __func__);
563 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
565 req->local_port = cpu_to_be16(f->fs.val.lport);
566 req->peer_port = cpu_to_be16(f->fs.val.fport);
567 req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
568 f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
569 req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
570 f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
571 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
572 f->fs.newvlan == VLAN_REWRITE) |
573 V_DELACK(f->fs.hitcnts) |
574 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
575 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
577 V_TX_CHAN(f->fs.eport) |
578 V_ULP_MODE(ULP_MODE_NONE) |
579 F_TCAM_BYPASS | F_NON_OFFLOAD);
580 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
581 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
582 V_RSS_QUEUE(f->fs.iq) |
585 V_SACK_EN(f->fs.swapmac) |
586 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
587 (f->fs.dirsteer << 1)) |
588 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
592 * Set the specified hash filter.
594 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
595 struct ch_filter_specification *fs,
596 struct filter_ctx *ctx)
598 struct port_info *pi = ethdev2pinfo(dev);
599 struct adapter *adapter = pi->adapter;
600 struct tid_info *t = &adapter->tids;
601 struct filter_entry *f;
602 struct rte_mbuf *mbuf;
603 struct sge_ctrl_txq *ctrlq;
608 ret = cxgbe_validate_filter(adapter, fs);
612 iq = get_filter_steerq(dev, fs);
614 ctrlq = &adapter->sge.ctrlq[pi->port_id];
616 f = t4_os_alloc(sizeof(*f));
625 /* Allocate MPS TCAM entry to match Destination MAC. */
626 if (f->fs.mask.macidx) {
629 idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
635 f->fs.val.macidx = idx;
639 * If the new filter requires loopback Destination MAC and/or VLAN
640 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
643 if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT ||
644 f->fs.newvlan == VLAN_REWRITE) {
645 /* allocate L2T entry for new filter */
646 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
647 f->fs.eport, f->fs.dmac);
654 /* If the new filter requires Source MAC rewriting then we need to
655 * allocate a SMT entry for the filter
658 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
665 atid = cxgbe_alloc_atid(t, f);
669 if (f->fs.type == FILTER_TYPE_IPV6) {
670 /* IPv6 hash filter */
671 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
675 size = sizeof(struct cpl_t6_act_open_req6);
676 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
682 mbuf->data_len = size;
683 mbuf->pkt_len = mbuf->data_len;
685 mk_act_open_req6(f, mbuf,
686 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
689 /* IPv4 hash filter */
690 size = sizeof(struct cpl_t6_act_open_req);
691 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
697 mbuf->data_len = size;
698 mbuf->pkt_len = mbuf->data_len;
700 mk_act_open_req(f, mbuf,
701 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
706 t4_mgmt_tx(ctrlq, mbuf);
710 cxgbe_free_atid(t, atid);
719 * t4_mk_filtdelwr - create a delete filter WR
720 * @adap: adapter context
721 * @ftid: the filter ID
722 * @wr: the filter work request to populate
723 * @qid: ingress queue to receive the delete notification
725 * Creates a filter work request to delete the supplied filter. If @qid is
726 * negative the delete notification is suppressed.
728 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
729 struct fw_filter2_wr *wr, int qid)
731 memset(wr, 0, sizeof(*wr));
732 if (adap->params.filter2_wr_support)
733 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
735 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
736 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
737 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
738 V_FW_FILTER_WR_NOREPLY(qid < 0));
739 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
741 wr->rx_chan_rx_rpl_iq =
742 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
746 * Create FW work request to delete the filter at a specified index
748 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
750 struct adapter *adapter = ethdev2adap(dev);
751 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
752 struct rte_mbuf *mbuf;
753 struct fw_filter2_wr *fwr;
754 struct sge_ctrl_txq *ctrlq;
755 unsigned int port_id = ethdev2pinfo(dev)->port_id;
757 ctrlq = &adapter->sge.ctrlq[port_id];
758 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
762 mbuf->data_len = sizeof(*fwr);
763 mbuf->pkt_len = mbuf->data_len;
765 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
766 t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
769 * Mark the filter as "pending" and ship off the Filter Work Request.
770 * When we get the Work Request Reply we'll clear the pending status.
773 t4_mgmt_tx(ctrlq, mbuf);
777 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
779 struct adapter *adapter = ethdev2adap(dev);
780 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
781 struct rte_mbuf *mbuf;
782 struct fw_filter2_wr *fwr;
783 struct sge_ctrl_txq *ctrlq;
784 unsigned int port_id = ethdev2pinfo(dev)->port_id;
787 ctrlq = &adapter->sge.ctrlq[port_id];
788 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
794 mbuf->data_len = sizeof(*fwr);
795 mbuf->pkt_len = mbuf->data_len;
797 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
798 memset(fwr, 0, sizeof(*fwr));
801 * Construct the work request to set the filter.
803 if (adapter->params.filter2_wr_support)
804 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
806 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
807 fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
809 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
810 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
811 V_FW_FILTER_WR_NOREPLY(0) |
812 V_FW_FILTER_WR_IQ(f->fs.iq));
813 fwr->del_filter_to_l2tix =
814 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
815 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
816 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
817 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
818 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
819 V_FW_FILTER_WR_INSVLAN
820 (f->fs.newvlan == VLAN_INSERT ||
821 f->fs.newvlan == VLAN_REWRITE) |
822 V_FW_FILTER_WR_RMVLAN
823 (f->fs.newvlan == VLAN_REMOVE ||
824 f->fs.newvlan == VLAN_REWRITE) |
825 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
826 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
827 V_FW_FILTER_WR_PRIO(f->fs.prio) |
828 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
829 fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
830 fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
831 fwr->frag_to_ovlan_vldm =
832 (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
833 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
834 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
835 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
836 fwr->smac_sel = f->smt ? f->smt->hw_idx : 0;
837 fwr->rx_chan_rx_rpl_iq =
838 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
839 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
841 fwr->maci_to_matchtypem =
842 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
843 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
844 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
845 V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
846 fwr->ptcl = f->fs.val.proto;
847 fwr->ptclm = f->fs.mask.proto;
848 fwr->ttyp = f->fs.val.tos;
849 fwr->ttypm = f->fs.mask.tos;
850 fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
851 fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
852 fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
853 fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
854 rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
855 rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
856 rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
857 rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
858 fwr->lp = cpu_to_be16(f->fs.val.lport);
859 fwr->lpm = cpu_to_be16(f->fs.mask.lport);
860 fwr->fp = cpu_to_be16(f->fs.val.fport);
861 fwr->fpm = cpu_to_be16(f->fs.mask.fport);
863 if (adapter->params.filter2_wr_support) {
864 fwr->filter_type_swapmac =
865 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
866 fwr->natmode_to_ulp_type =
867 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
870 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
871 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
872 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
873 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
874 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
878 * Mark the filter as "pending" and ship off the Filter Work Request.
879 * When we get the Work Request Reply we'll clear the pending status.
882 t4_mgmt_tx(ctrlq, mbuf);
890 * Set the corresponding entries in the bitmap.
892 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
896 t4_os_lock(&t->ftid_lock);
897 if (rte_bitmap_get(t->ftid_bmap, fidx)) {
898 t4_os_unlock(&t->ftid_lock);
902 for (i = fidx; i < fidx + nentries; i++)
903 rte_bitmap_set(t->ftid_bmap, i);
904 t4_os_unlock(&t->ftid_lock);
909 * Clear the corresponding entries in the bitmap.
911 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
915 t4_os_lock(&t->ftid_lock);
916 for (i = fidx; i < fidx + nentries; i++)
917 rte_bitmap_clear(t->ftid_bmap, i);
918 t4_os_unlock(&t->ftid_lock);
922 * Check a delete filter request for validity and send it to the hardware.
923 * Return 0 on success, an error number otherwise. We attach any provided
924 * filter operation context to the internal filter specification in order to
925 * facilitate signaling completion of the operation.
927 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
928 struct ch_filter_specification *fs,
929 struct filter_ctx *ctx)
931 struct port_info *pi = dev->data->dev_private;
932 struct adapter *adapter = pi->adapter;
933 struct filter_entry *f;
934 unsigned int chip_ver;
938 if (is_hashfilter(adapter) && fs->cap)
939 return cxgbe_del_hash_filter(dev, filter_id, ctx);
941 if (filter_id >= adapter->tids.nftids)
944 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
947 * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
948 * and 4 slot boundary for cards below T6.
950 if (fs->type == FILTER_TYPE_IPV6) {
951 if (chip_ver < CHELSIO_T6)
957 nentries = cxgbe_filter_slots(adapter, fs->type);
958 ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
960 dev_warn(adap, "%s: could not find filter entry: %u\n",
961 __func__, filter_id);
965 f = &adapter->tids.ftid_tab[filter_id];
966 ret = writable_filter(f);
972 cxgbe_clear_ftid(&adapter->tids,
973 f->tid - adapter->tids.ftid_base,
975 return del_filter_wr(dev, filter_id);
979 * If the caller has passed in a Completion Context then we need to
980 * mark it as a successful completion so they don't stall waiting
985 t4_complete(&ctx->completion);
992 * Check a Chelsio Filter Request for validity, convert it into our internal
993 * format and send it to the hardware. Return 0 on success, an error number
994 * otherwise. We attach any provided filter operation context to the internal
995 * filter specification in order to facilitate signaling completion of the
998 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
999 struct ch_filter_specification *fs,
1000 struct filter_ctx *ctx)
1002 struct port_info *pi = ethdev2pinfo(dev);
1003 struct adapter *adapter = pi->adapter;
1004 u8 nentries, bitoff[16] = {0};
1005 struct filter_entry *f;
1006 unsigned int chip_ver;
1007 unsigned int fidx, iq;
1011 if (is_hashfilter(adapter) && fs->cap)
1012 return cxgbe_set_hash_filter(dev, fs, ctx);
1014 if (filter_id >= adapter->tids.nftids)
1017 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1019 ret = cxgbe_validate_filter(adapter, fs);
1024 * IPv6 filters occupy four slots and must be aligned on four-slot
1025 * boundaries for T5. On T6, IPv6 filters occupy two-slots and
1026 * must be aligned on two-slot boundaries.
1028 * IPv4 filters only occupy a single slot and have no alignment
1032 if (fs->type == FILTER_TYPE_IPV6) {
1033 if (chip_ver < CHELSIO_T6)
1039 if (fidx != filter_id)
1042 nentries = cxgbe_filter_slots(adapter, fs->type);
1043 ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
1047 iq = get_filter_steerq(dev, fs);
1050 * Check to make sure that provided filter index is not
1051 * already in use by someone else
1053 f = &adapter->tids.ftid_tab[filter_id];
1057 fidx = adapter->tids.ftid_base + filter_id;
1058 ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
1063 * Check to make sure the filter requested is writable ...
1065 ret = writable_filter(f);
1067 /* Clear the bits we have set above */
1068 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1073 * Convert the filter specification into our internal format.
1074 * We copy the PF/VF specification into the Outer VLAN field
1075 * here so the rest of the code -- including the interface to
1076 * the firmware -- doesn't have to constantly do these checks.
1082 /* Allocate MPS TCAM entry to match Destination MAC. */
1083 if (f->fs.mask.macidx) {
1086 idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
1092 f->fs.val.macidx = idx;
1095 /* Allocate a clip table entry only if we have non-zero IPv6 address. */
1096 if (chip_ver > CHELSIO_T5 && f->fs.type &&
1097 memcmp(f->fs.val.lip, bitoff, sizeof(bitoff))) {
1098 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&f->fs.val.lip);
1105 /* If the new filter requires loopback Destination MAC and/or VLAN
1106 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1109 if (f->fs.newvlan || f->fs.newdmac) {
1110 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
1111 f->fs.eport, f->fs.dmac);
1118 /* If the new filter requires Source MAC rewriting then we need to
1119 * allocate a SMT entry for the filter
1121 if (f->fs.newsmac) {
1122 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
1129 iconf = adapter->params.tp.ingress_config;
1131 /* Either PFVF or OVLAN can be active, but not both
1132 * So, if PFVF is enabled, then overwrite the OVLAN
1133 * fields with PFVF fields before writing the spec
1136 if (iconf & F_VNIC) {
1137 f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf;
1138 f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf;
1139 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1140 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1144 * Attempt to set the filter. If we don't succeed, we clear
1145 * it and return the failure.
1148 f->tid = fidx; /* Save the actual tid */
1149 ret = set_filter_wr(dev, filter_id);
1156 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1162 * Handle a Hash filter write reply.
1164 void cxgbe_hash_filter_rpl(struct adapter *adap,
1165 const struct cpl_act_open_rpl *rpl)
1167 struct tid_info *t = &adap->tids;
1168 struct filter_entry *f;
1169 struct filter_ctx *ctx = NULL;
1170 unsigned int tid = GET_TID(rpl);
1171 unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1172 (be32_to_cpu(rpl->atid_status)));
1173 unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1175 f = lookup_atid(t, ftid);
1177 dev_warn(adap, "%s: could not find filter entry: %d\n",
1186 case CPL_ERR_NONE: {
1188 f->pending = 0; /* asynchronous setup completed */
1191 cxgbe_insert_tid(t, f, f->tid, 0);
1192 cxgbe_free_atid(t, ftid);
1198 set_tcb_field(adap, tid,
1200 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1201 V_TCB_T_RTT_TS_RECENT_AGE
1202 (M_TCB_T_RTT_TS_RECENT_AGE),
1203 V_TCB_TIMESTAMP(0ULL) |
1204 V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1207 set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1);
1208 if (f->fs.newvlan == VLAN_INSERT ||
1209 f->fs.newvlan == VLAN_REWRITE)
1210 set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1211 if (f->fs.newsmac) {
1212 set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1);
1213 set_tcb_field(adap, tid, W_TCB_SMAC_SEL,
1214 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1215 V_TCB_SMAC_SEL(f->smt->hw_idx), 1);
1220 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1224 if (status == CPL_ERR_TCAM_FULL)
1225 ctx->result = -EAGAIN;
1227 ctx->result = -EINVAL;
1230 cxgbe_free_atid(t, ftid);
1236 t4_complete(&ctx->completion);
1240 * Handle a LE-TCAM filter write/deletion reply.
1242 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1244 struct filter_entry *f = NULL;
1245 unsigned int tid = GET_TID(rpl);
1246 int idx, max_fidx = adap->tids.nftids;
1248 /* Get the corresponding filter entry for this tid */
1249 if (adap->tids.ftid_tab) {
1250 /* Check this in normal filter region */
1251 idx = tid - adap->tids.ftid_base;
1252 if (idx >= max_fidx)
1255 f = &adap->tids.ftid_tab[idx];
1260 /* We found the filter entry for this tid */
1262 unsigned int ret = G_COOKIE(rpl->cookie);
1263 struct filter_ctx *ctx;
1266 * Pull off any filter operation context attached to the
1272 if (ret == FW_FILTER_WR_FLT_ADDED) {
1273 f->pending = 0; /* asynchronous setup completed */
1279 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1281 * Clear the filter when we get confirmation from the
1282 * hardware that the filter has been deleted.
1289 * Something went wrong. Issue a warning about the
1290 * problem and clear everything out.
1292 dev_warn(adap, "filter %u setup failed with error %u\n",
1296 ctx->result = -EINVAL;
1300 t4_complete(&ctx->completion);
1305 * Retrieve the packet count for the specified filter.
1307 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1308 u64 *c, int hash, bool get_byte)
1310 struct filter_entry *f;
1311 unsigned int tcb_base, tcbaddr;
1314 tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1315 if (is_hashfilter(adapter) && hash) {
1316 if (fidx < adapter->tids.ntids) {
1317 f = adapter->tids.tid_tab[fidx];
1321 if (is_t5(adapter->params.chip)) {
1325 tcbaddr = tcb_base + (fidx * TCB_SIZE);
1331 if (fidx >= adapter->tids.nftids)
1334 f = &adapter->tids.ftid_tab[fidx];
1338 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1341 f = &adapter->tids.ftid_tab[fidx];
1346 if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1348 * For T5, the Filter Packet Hit Count is maintained as a
1349 * 32-bit Big Endian value in the TCB field {timestamp}.
1350 * Similar to the craziness above, instead of the filter hit
1351 * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1352 * sizeof(u32)), it actually shows up at offset 24. Whacky.
1355 unsigned int word_offset = 4;
1356 __be64 be64_byte_count;
1358 t4_os_lock(&adapter->win0_lock);
1359 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1361 (word_offset * sizeof(__be32)),
1362 sizeof(be64_byte_count),
1365 t4_os_unlock(&adapter->win0_lock);
1368 *c = be64_to_cpu(be64_byte_count);
1370 unsigned int word_offset = 6;
1373 t4_os_lock(&adapter->win0_lock);
1374 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1376 (word_offset * sizeof(__be32)),
1377 sizeof(be32_count), &be32_count,
1379 t4_os_unlock(&adapter->win0_lock);
1382 *c = (u64)be32_to_cpu(be32_count);
1389 * Clear the packet count for the specified filter.
1391 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1392 int hash, bool clear_byte)
1394 u64 tcb_mask = 0, tcb_val = 0;
1395 struct filter_entry *f = NULL;
1398 if (is_hashfilter(adapter) && hash) {
1399 if (fidx >= adapter->tids.ntids)
1402 /* No hitcounts supported for T5 hashfilters */
1403 if (is_t5(adapter->params.chip))
1406 f = adapter->tids.tid_tab[fidx];
1408 if (fidx >= adapter->tids.nftids)
1411 f = &adapter->tids.ftid_tab[fidx];
1414 if (!f || !f->valid)
1417 tcb_word = W_TCB_TIMESTAMP;
1418 tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1419 tcb_val = V_TCB_TIMESTAMP(0ULL);
1421 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1424 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1426 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1427 V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1428 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1429 V_TCB_T_RTSEQ_RECENT(0ULL);
1431 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1438 * Handle a Hash filter delete reply.
1440 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1441 const struct cpl_abort_rpl_rss *rpl)
1443 struct tid_info *t = &adap->tids;
1444 struct filter_entry *f;
1445 struct filter_ctx *ctx = NULL;
1446 unsigned int tid = GET_TID(rpl);
1448 f = lookup_tid(t, tid);
1450 dev_warn(adap, "%s: could not find filter entry: %u\n",
1458 cxgbe_remove_tid(t, 0, tid, 0);
1463 t4_complete(&ctx->completion);